diff options
256 files changed, 2930 insertions, 1671 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index ecd35e9d441..feca0758391 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -46,7 +46,6 @@ <sect1><title>Atomic and pointer manipulation</title> !Iarch/x86/include/asm/atomic.h -!Iarch/x86/include/asm/unaligned.h </sect1> <sect1><title>Delaying, scheduling, and timer routines</title> diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index a20c6f6fffc..6899f471fb1 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl @@ -57,7 +57,6 @@ </para> <sect1><title>String Conversions</title> -!Ilib/vsprintf.c !Elib/vsprintf.c </sect1> <sect1><title>String Manipulation</title> diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt new file mode 100644 index 00000000000..e578feed6d8 --- /dev/null +++ b/Documentation/block/cfq-iosched.txt @@ -0,0 +1,45 @@ +CFQ ioscheduler tunables +======================== + +slice_idle +---------- +This specifies how long CFQ should idle for next request on certain cfq queues +(for sequential workloads) and service trees (for random workloads) before +queue is expired and CFQ selects next queue to dispatch from. + +By default slice_idle is a non-zero value. That means by default we idle on +queues/service trees. This can be very helpful on highly seeky media like +single spindle SATA/SAS disks where we can cut down on overall number of +seeks and see improved throughput. + +Setting slice_idle to 0 will remove all the idling on queues/service tree +level and one should see an overall improved throughput on faster storage +devices like multiple SATA/SAS disks in hardware RAID configuration. The down +side is that isolation provided from WRITES also goes down and notion of +IO priority becomes weaker. + +So depending on storage and workload, it might be useful to set slice_idle=0. +In general I think for SATA/SAS disks and software RAID of SATA/SAS disks +keeping slice_idle enabled should be useful. For any configurations where +there are multiple spindles behind single LUN (Host based hardware RAID +controller or for storage arrays), setting slice_idle=0 might end up in better +throughput and acceptable latencies. + +CFQ IOPS Mode for group scheduling +=================================== +Basic CFQ design is to provide priority based time slices. Higher priority +process gets bigger time slice and lower priority process gets smaller time +slice. Measuring time becomes harder if storage is fast and supports NCQ and +it would be better to dispatch multiple requests from multiple cfq queues in +request queue at a time. In such scenario, it is not possible to measure time +consumed by single queue accurately. + +What is possible though is to measure number of requests dispatched from a +single queue and also allow dispatch from multiple cfq queue at the same time. +This effectively becomes the fairness in terms of IOPS (IO operations per +second). + +If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches +to IOPS mode and starts providing fairness in terms of number of requests +dispatched. Note that this mode switching takes effect only for group +scheduling. For non-cgroup users nothing should change. diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index 48e0b21b005..6919d62591d 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt @@ -217,6 +217,7 @@ Details of cgroup files CFQ sysfs tunable ================= /sys/block/<disk>/queue/iosched/group_isolation +----------------------------------------------- If group_isolation=1, it provides stronger isolation between groups at the expense of throughput. By default group_isolation is 0. In general that @@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient and one wants stronger isolation between groups, then set group_isolation=1 but this will come at cost of reduced throughput. +/sys/block/<disk>/queue/iosched/slice_idle +------------------------------------------ +On a faster hardware CFQ can be slow, especially with sequential workload. +This happens because CFQ idles on a single queue and single queue might not +drive deeper request queue depths to keep the storage busy. In such scenarios +one can try setting slice_idle=0 and that would switch CFQ to IOPS +(IO operations per second) mode on NCQ supporting hardware. + +That means CFQ will not idle between cfq queues of a cfq group and hence be +able to driver higher queue depth and achieve better throughput. That also +means that cfq provides fairness among groups in terms of IOPS and not in +terms of disk time. + +/sys/block/<disk>/queue/iosched/group_idle +------------------------------------------ +If one disables idling on individual cfq queues and cfq service trees by +setting slice_idle=0, group_idle kicks in. That means CFQ will still idle +on the group in an attempt to provide fairness among groups. + +By default group_idle is same as slice_idle and does not do anything if +slice_idle is enabled. + +One can experience an overall throughput drop if you have created multiple +groups and put applications in that group which are not driving enough +IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle +on individual groups and throughput should improve. + What works ========== - Currently only sync IO queues are support. All the buffered writes are diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt index 27a52b35d55..3d8a97747f7 100644 --- a/Documentation/kernel-doc-nano-HOWTO.txt +++ b/Documentation/kernel-doc-nano-HOWTO.txt @@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed. section titled <section title> from <filename>. Spaces are allowed in <section title>; do not quote the <section title>. +!C<filename> is replaced by nothing, but makes the tools check that +all DOC: sections and documented functions, symbols, etc. are used. +This makes sense to use when you use !F/!P only and want to verify +that all documentation is included. + Tim. */ <twaugh@redhat.com> diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt index 9363e056188..8ed17587a74 100644 --- a/Documentation/power/regulator/overview.txt +++ b/Documentation/power/regulator/overview.txt @@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where current limit is controllable). (C) 2008 Wolfson Microelectronics PLC. -Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> +Author: Liam Girdwood <lrg@slimlogic.co.uk> Nomenclature diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index ce46fa1e643..37c6aad5e59 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -296,6 +296,7 @@ Conexant 5051 Conexant 5066 ============= laptop Basic Laptop config (default) + hp-laptop HP laptops, e g G60 dell-laptop Dell laptops dell-vostro Dell Vostro olpc-xo-1_5 OLPC XO 1.5 diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt new file mode 100644 index 00000000000..e4498a2872c --- /dev/null +++ b/Documentation/workqueue.txt @@ -0,0 +1,380 @@ + +Concurrency Managed Workqueue (cmwq) + +September, 2010 Tejun Heo <tj@kernel.org> + Florian Mickler <florian@mickler.org> + +CONTENTS + +1. Introduction +2. Why cmwq? +3. The Design +4. Application Programming Interface (API) +5. Example Execution Scenarios +6. Guidelines + + +1. Introduction + +There are many cases where an asynchronous process execution context +is needed and the workqueue (wq) API is the most commonly used +mechanism for such cases. + +When such an asynchronous execution context is needed, a work item +describing which function to execute is put on a queue. An +independent thread serves as the asynchronous execution context. The +queue is called workqueue and the thread is called worker. + +While there are work items on the workqueue the worker executes the +functions associated with the work items one after the other. When +there is no work item left on the workqueue the worker becomes idle. +When a new work item gets queued, the worker begins executing again. + + +2. Why cmwq? + +In the original wq implementation, a multi threaded (MT) wq had one +worker thread per CPU and a single threaded (ST) wq had one worker +thread system-wide. A single MT wq needed to keep around the same +number of workers as the number of CPUs. The kernel grew a lot of MT +wq users over the years and with the number of CPU cores continuously +rising, some systems saturated the default 32k PID space just booting +up. + +Although MT wq wasted a lot of resource, the level of concurrency +provided was unsatisfactory. The limitation was common to both ST and +MT wq albeit less severe on MT. Each wq maintained its own separate +worker pool. A MT wq could provide only one execution context per CPU +while a ST wq one for the whole system. Work items had to compete for +those very limited execution contexts leading to various problems +including proneness to deadlocks around the single execution context. + +The tension between the provided level of concurrency and resource +usage also forced its users to make unnecessary tradeoffs like libata +choosing to use ST wq for polling PIOs and accepting an unnecessary +limitation that no two polling PIOs can progress at the same time. As +MT wq don't provide much better concurrency, users which require +higher level of concurrency, like async or fscache, had to implement +their own thread pool. + +Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with +focus on the following goals. + +* Maintain compatibility with the original workqueue API. + +* Use per-CPU unified worker pools shared by all wq to provide + flexible level of concurrency on demand without wasting a lot of + resource. + +* Automatically regulate worker pool and level of concurrency so that + the API users don't need to worry about such details. + + +3. The Design + +In order to ease the asynchronous execution of functions a new +abstraction, the work item, is introduced. + +A work item is a simple struct that holds a pointer to the function +that is to be executed asynchronously. Whenever a driver or subsystem +wants a function to be executed asynchronously it has to set up a work +item pointing to that function and queue that work item on a +workqueue. + +Special purpose threads, called worker threads, execute the functions +off of the queue, one after the other. If no work is queued, the +worker threads become idle. These worker threads are managed in so +called thread-pools. + +The cmwq design differentiates between the user-facing workqueues that +subsystems and drivers queue work items on and the backend mechanism +which manages thread-pool and processes the queued work items. + +The backend is called gcwq. There is one gcwq for each possible CPU +and one gcwq to serve work items queued on unbound workqueues. + +Subsystems and drivers can create and queue work items through special +workqueue API functions as they see fit. They can influence some +aspects of the way the work items are executed by setting flags on the +workqueue they are putting the work item on. These flags include +things like CPU locality, reentrancy, concurrency limits and more. To +get a detailed overview refer to the API description of +alloc_workqueue() below. + +When a work item is queued to a workqueue, the target gcwq is +determined according to the queue parameters and workqueue attributes +and appended on the shared worklist of the gcwq. For example, unless +specifically overridden, a work item of a bound workqueue will be +queued on the worklist of exactly that gcwq that is associated to the +CPU the issuer is running on. + +For any worker pool implementation, managing the concurrency level +(how many execution contexts are active) is an important issue. cmwq +tries to keep the concurrency at a minimal but sufficient level. +Minimal to save resources and sufficient in that the system is used at +its full capacity. + +Each gcwq bound to an actual CPU implements concurrency management by +hooking into the scheduler. The gcwq is notified whenever an active +worker wakes up or sleeps and keeps track of the number of the +currently runnable workers. Generally, work items are not expected to +hog a CPU and consume many cycles. That means maintaining just enough +concurrency to prevent work processing from stalling should be +optimal. As long as there are one or more runnable workers on the +CPU, the gcwq doesn't start execution of a new work, but, when the +last running worker goes to sleep, it immediately schedules a new +worker so that the CPU doesn't sit idle while there are pending work +items. This allows using a minimal number of workers without losing +execution bandwidth. + +Keeping idle workers around doesn't cost other than the memory space +for kthreads, so cmwq holds onto idle ones for a while before killing +them. + +For an unbound wq, the above concurrency management doesn't apply and +the gcwq for the pseudo unbound CPU tries to start executing all work +items as soon as possible. The responsibility of regulating +concurrency level is on the users. There is also a flag to mark a +bound wq to ignore the concurrency management. Please refer to the +API section for details. + +Forward progress guarantee relies on that workers can be created when +more execution contexts are necessary, which in turn is guaranteed +through the use of rescue workers. All work items which might be used +on code paths that handle memory reclaim are required to be queued on +wq's that have a rescue-worker reserved for execution under memory +pressure. Else it is possible that the thread-pool deadlocks waiting +for execution contexts to free up. + + +4. Application Programming Interface (API) + +alloc_workqueue() allocates a wq. The original create_*workqueue() +functions are deprecated and scheduled for removal. alloc_workqueue() +takes three arguments - @name, @flags and @max_active. @name is the +name of the wq and also used as the name of the rescuer thread if +there is one. + +A wq no longer manages execution resources but serves as a domain for +forward progress guarantee, flush and work item attributes. @flags +and @max_active control how work items are assigned execution +resources, scheduled and executed. + +@flags: + + WQ_NON_REENTRANT + + By default, a wq guarantees non-reentrance only on the same + CPU. A work item may not be executed concurrently on the same + CPU by multiple workers but is allowed to be executed + concurrently on multiple CPUs. This flag makes sure + non-reentrance is enforced across all CPUs. Work items queued + to a non-reentrant wq are guaranteed to be executed by at most + one worker system-wide at any given time. + + WQ_UNBOUND + + Work items queued to an unbound wq are served by a special + gcwq which hosts workers which are not bound to any specific + CPU. This makes the wq behave as a simple execution context + provider without concurrency management. The unbound gcwq + tries to start execution of work items as soon as possible. + Unbound wq sacrifices locality but is useful for the following + cases. + + * Wide fluctuation in the concurrency level requirement is + expected and using bound wq may end up creating large number + of mostly unused workers across different CPUs as the issuer + hops through different CPUs. + + * Long running CPU intensive workloads which can be better + managed by the system scheduler. + + WQ_FREEZEABLE + + A freezeable wq participates in the freeze phase of the system + suspend operations. Work items on the wq are drained and no + new work item starts execution until thawed. + + WQ_RESCUER + + All wq which might be used in the memory reclaim paths _MUST_ + have this flag set. This reserves one worker exclusively for + the execution of this wq under memory pressure. + + WQ_HIGHPRI + + Work items of a highpri wq are queued at the head of the + worklist of the target gcwq and start execution regardless of + the current concurrency level. In other words, highpri work + items will always start execution as soon as execution + resource is available. + + Ordering among highpri work items is preserved - a highpri + work item queued after another highpri work item will start + execution after the earlier highpri work item starts. + + Although highpri work items are not held back by other + runnable work items, they still contribute to the concurrency + level. Highpri work items in runnable state will prevent + non-highpri work items from starting execution. + + This flag is meaningless for unbound wq. + + WQ_CPU_INTENSIVE + + Work items of a CPU intensive wq do not contribute to the + concurrency level. In other words, runnable CPU intensive + work items will not prevent other work items from starting + execution. This is useful for bound work items which are + expected to hog CPU cycles so that their execution is + regulated by the system scheduler. + + Although CPU intensive work items don't contribute to the + concurrency level, start of their executions is still + regulated by the concurrency management and runnable + non-CPU-intensive work items can delay execution of CPU + intensive work items. + + This flag is meaningless for unbound wq. + + WQ_HIGHPRI | WQ_CPU_INTENSIVE + + This combination makes the wq avoid interaction with + concurrency management completely and behave as a simple + per-CPU execution context provider. Work items queued on a + highpri CPU-intensive wq start execution as soon as resources + are available and don't affect execution of other work items. + +@max_active: + +@max_active determines the maximum number of execution contexts per +CPU which can be assigned to the work items of a wq. For example, +with @max_active of 16, at most 16 work items of the wq can be +executing at the same time per CPU. + +Currently, for a bound wq, the maximum limit for @max_active is 512 +and the default value used when 0 is specified is 256. For an unbound +wq, the limit is higher of 512 and 4 * num_possible_cpus(). These +values are chosen sufficiently high such that they are not the +limiting factor while providing protection in runaway cases. + +The number of active work items of a wq is usually regulated by the +users of the wq, more specifically, by how many work items the users +may queue at the same time. Unless there is a specific need for +throttling the number of active work items, specifying '0' is +recommended. + +Some users depend on the strict execution ordering of ST wq. The +combination of @max_active of 1 and WQ_UNBOUND is used to achieve this +behavior. Work items on such wq are always queued to the unbound gcwq +and only one work item can be active at any given time thus achieving +the same ordering property as ST wq. + + +5. Example Execution Scenarios + +The following example execution scenarios try to illustrate how cmwq +behave under different configurations. + + Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU. + w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms + again before finishing. w1 and w2 burn CPU for 5ms then sleep for + 10ms. + +Ignoring all other tasks, works and processing overhead, and assuming +simple FIFO scheduling, the following is one highly simplified version +of possible sequences of events with the original wq. + + TIME IN MSECS EVENT + 0 w0 starts and burns CPU + 5 w0 sleeps + 15 w0 wakes up and burns CPU + 20 w0 finishes + 20 w1 starts and burns CPU + 25 w1 sleeps + 35 w1 wakes up and finishes + 35 w2 starts and burns CPU + 40 w2 sleeps + 50 w2 wakes up and finishes + +And with cmwq with @max_active >= 3, + + TIME IN MSECS EVENT + 0 w0 starts and burns CPU + 5 w0 sleeps + 5 w1 starts and burns CPU + 10 w1 sleeps + 10 w2 starts and burns CPU + 15 w2 sleeps + 15 w0 wakes up and burns CPU + 20 w0 finishes + 20 w1 wakes up and finishes + 25 w2 wakes up and finishes + +If @max_active == 2, + + TIME IN MSECS EVENT + 0 w0 starts and burns CPU + 5 w0 sleeps + 5 w1 starts and burns CPU + 10 w1 sleeps + 15 w0 wakes up and burns CPU + 20 w0 finishes + 20 w1 wakes up and finishes + 20 w2 starts and burns CPU + 25 w2 sleeps + 35 w2 wakes up and finishes + +Now, let's assume w1 and w2 are queued to a different wq q1 which has +WQ_HIGHPRI set, + + TIME IN MSECS EVENT + 0 w1 and w2 start and burn CPU + 5 w1 sleeps + 10 w2 sleeps + 10 w0 starts and burns CPU + 15 w0 sleeps + 15 w1 wakes up and finishes + 20 w2 wakes up and finishes + 25 w0 wakes up and burns CPU + 30 w0 finishes + +If q1 has WQ_CPU_INTENSIVE set, + + TIME IN MSECS EVENT + 0 w0 starts and burns CPU + 5 w0 sleeps + 5 w1 and w2 start and burn CPU + 10 w1 sleeps + 15 w2 sleeps + 15 w0 wakes up and burns CPU + 20 w0 finishes + 20 w1 wakes up and finishes + 25 w2 wakes up and finishes + + +6. Guidelines + +* Do not forget to use WQ_RESCUER if a wq may process work items which + are used during memory reclaim. Each wq with WQ_RESCUER set has one + rescuer thread reserved for it. If there is dependency among + multiple work items used during memory reclaim, they should be + queued to separate wq each with WQ_RESCUER. + +* Unless strict ordering is required, there is no need to use ST wq. + +* Unless there is a specific need, using 0 for @max_active is + recommended. In most use cases, concurrency level usually stays + well under the default limit. + +* A wq serves as a domain for forward progress guarantee (WQ_RESCUER), + flush and work item attributes. Work items which are not involved + in memory reclaim and don't need to be flushed as a part of a group + of work items, and don't require any special attribute, can use one + of the system wq. There is no difference in execution + characteristics between using a dedicated wq and a system wq. + +* Unless work items are expected to consume a huge amount of CPU + cycles, using a bound wq is usually beneficial due to the increased + level of locality in wq operations and work item execution. diff --git a/MAINTAINERS b/MAINTAINERS index 9800de5ec22..e7c528ff101 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1445,6 +1445,16 @@ S: Maintained F: Documentation/video4linux/cafe_ccic F: drivers/media/video/cafe_ccic* +CAIF NETWORK LAYER +M: Sjur Braendeland <sjur.brandeland@stericsson.com> +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/caif/ +F: drivers/net/caif/ +F: include/linux/caif/ +F: include/net/caif/ +F: net/caif/ + CALGARY x86-64 IOMMU M: Muli Ben-Yehuda <muli@il.ibm.com> M: "Jon D. Mason" <jdmason@kudzu.us> @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 36 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Sheep on Meth # *DOCUMENTATION* diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 753c0d31a3d..c67b47f1c0f 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -121,8 +121,8 @@ static struct clk ssc1_clk = { .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, .type = CLK_TYPE_PERIPHERAL, }; -static struct clk tcb_clk = { - .name = "tcb_clk", +static struct clk tcb0_clk = { + .name = "tcb0_clk", .pmc_mask = 1 << AT91SAM9G45_ID_TCB, .type = CLK_TYPE_PERIPHERAL, }; @@ -192,6 +192,14 @@ static struct clk ohci_clk = { .parent = &uhphs_clk, }; +/* One additional fake clock for second TC block */ +static struct clk tcb1_clk = { + .name = "tcb1_clk", + .pmc_mask = 0, + .type = CLK_TYPE_PERIPHERAL, + .parent = &tcb0_clk, +}; + static struct clk *periph_clocks[] __initdata = { &pioA_clk, &pioB_clk, @@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = { &spi1_clk, &ssc0_clk, &ssc1_clk, - &tcb_clk, + &tcb0_clk, &pwm_clk, &tsc_clk, &dma_clk, @@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = { &mmc1_clk, // irq0 &ohci_clk, + &tcb1_clk, }; /* diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 809114d5a5a..5e71ccd5e7d 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = { .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, .flags = IORESOURCE_MEM, }, - [2] = { + [1] = { .start = AT91SAM9G45_ID_DMA, .end = AT91SAM9G45_ID_DMA, .flags = IORESOURCE_IRQ, @@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = { static void __init at91_add_device_tc(void) { /* this chip has one clock and irq for all six TC channels */ - at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); + at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); platform_device_register(&at91sam9g45_tcb0_device); - at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); + at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); platform_device_register(&at91sam9g45_tcb1_device); } #else diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index c4c8865d52d..65eb0943194 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = { .start = AT91_PIN_PC11, .end = AT91_PIN_PC11, .flags = IORESOURCE_IRQ + | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, } }; static struct dm9000_plat_data dm9000_platdata = { - .flags = DM9000_PLATF_16BITONLY, + .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM, }; static struct platform_device dm9000_device = { @@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = { /* - * MCI (SD/MMC) - */ -static struct at91_mmc_data __initdata ek_mmc_data = { - .wire4 = 1, -// .det_pin = ... not connected -// .wp_pin = ... not connected -// .vcc_pin = ... not connected -}; - - -/* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { @@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void) at91_add_device_nand(&ek_nand_data); } +/* + * SPI related devices + */ +#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) /* * ADS7846 Touchscreen @@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = { #endif }; +#else /* CONFIG_SPI_ATMEL_* */ +/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */ + +/* + * MCI (SD/MMC) + * det_pin, wp_pin and vcc_pin are not connected + */ +static struct at91_mmc_data __initdata ek_mmc_data = { + .wire4 = 1, +}; + +#endif /* CONFIG_SPI_ATMEL_* */ + /* * LCD Controller diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index 7f7da439341..7525cee3983 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c @@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init); int __init clk_register(struct clk *clk) { if (clk_is_peripheral(clk)) { - clk->parent = &mck; + if (!clk->parent) + clk->parent = &mck; clk->mode = pmc_periph_mode; list_add_tail(&clk->node, &clocks); } diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h index f90edc85b50..9301a282161 100644 --- a/arch/ia64/include/asm/compat.h +++ b/arch/ia64/include/asm/compat.h @@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr) } static __inline__ void __user * -compat_alloc_user_space (long len) +arch_compat_alloc_user_space (long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 3567d54f8ce..331d42bda77 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set ;; RSM_PSR_I(p0, r18, r19) // mask interrupt delivery - mov ar.ccv=0 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP + mov r8=EINVAL // default to EINVAL #ifdef CONFIG_SMP - mov r17=1 + // __ticket_spin_trylock(r31) + ld4 r17=[r31] ;; - cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock - mov r8=EINVAL // default to EINVAL + mov.m ar.ccv=r17 + extr.u r9=r17,17,15 + adds r19=1,r17 + extr.u r18=r17,0,15 + ;; + cmp.eq p6,p7=r9,r18 ;; +(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv +(p6) dep.z r20=r19,1,15 // next serving ticket for unlock +(p7) br.cond.spnt.many .lock_contention + ;; + cmp4.eq p0,p7=r9,r17 + adds r31=2,r31 +(p7) br.cond.spnt.many .lock_contention ld8 r3=[r2] // re-read current->blocked now that we hold the lock - cmp4.ne p6,p0=r18,r0 -(p6) br.cond.spnt.many .lock_contention ;; #else ld8 r3=[r2] // re-read current->blocked now that we hold the lock - mov r8=EINVAL // default to EINVAL #endif add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 add r19=IA64_TASK_SIGNAL_OFFSET,r16 @@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set (p6) br.cond.spnt.few 1b // yes -> retry #ifdef CONFIG_SMP - st4.rel [r31]=r0 // release the lock + // __ticket_spin_unlock(r31) + st2.rel [r31]=r20 + mov r20=0 // i must not leak kernel bits... #endif SSM_PSR_I(p0, p9, r31) ;; @@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3) .sig_pending: #ifdef CONFIG_SMP - st4.rel [r31]=r0 // release the lock + // __ticket_spin_unlock(r31) + st2.rel [r31]=r20 // release the lock #endif SSM_PSR_I(p0, p9, r17) ;; diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 60b15d0aa07..b43b36beafe 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -340,10 +340,13 @@ #define __NR_set_thread_area 334 #define __NR_atomic_cmpxchg_32 335 #define __NR_atomic_barrier 336 +#define __NR_fanotify_init 337 +#define __NR_fanotify_mark 338 +#define __NR_prlimit64 339 #ifdef __KERNEL__ -#define NR_syscalls 337 +#define NR_syscalls 340 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 2391bdff099..6360c437dcf 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -765,4 +765,7 @@ sys_call_table: .long sys_set_thread_area .long sys_atomic_cmpxchg_32 /* 335 */ .long sys_atomic_barrier + .long sys_fanotify_init + .long sys_fanotify_mark + .long sys_prlimit64 diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index b30b3eb197a..79b1ed198c0 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -355,6 +355,9 @@ ENTRY(sys_call_table) .long sys_set_thread_area .long sys_atomic_cmpxchg_32 /* 335 */ .long sys_atomic_barrier + .long sys_fanotify_init + .long sys_fanotify_mark + .long sys_prlimit64 .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 613f6912dfc..dbc51065df5 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = (struct pt_regs *) ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 9d49073e827..db509dd8056 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c @@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = { ._intr = &SC0ICR, ._rxb = &SC0RXB, ._txb = &SC0TXB, - .rx_name = "ttySM0/Rx", - .tx_name = "ttySM0/Tx", + .rx_name = "ttySM0:Rx", + .tx_name = "ttySM0:Tx", #ifdef CONFIG_MN10300_TTYSM0_TIMER8 - .tm_name = "ttySM0/Timer8", + .tm_name = "ttySM0:Timer8", ._tmxmd = &TM8MD, ._tmxbr = &TM8BR, ._tmicr = &TM8ICR, .tm_irq = TM8IRQ, .div_timer = MNSCx_DIV_TIMER_16BIT, #else /* CONFIG_MN10300_TTYSM0_TIMER2 */ - .tm_name = "ttySM0/Timer2", + .tm_name = "ttySM0:Timer2", ._tmxmd = &TM2MD, ._tmxbr = (volatile u16 *) &TM2BR, ._tmicr = &TM2ICR, @@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = { ._intr = &SC1ICR, ._rxb = &SC1RXB, ._txb = &SC1TXB, - .rx_name = "ttySM1/Rx", - .tx_name = "ttySM1/Tx", + .rx_name = "ttySM1:Rx", + .tx_name = "ttySM1:Tx", #ifdef CONFIG_MN10300_TTYSM1_TIMER9 - .tm_name = "ttySM1/Timer9", + .tm_name = "ttySM1:Timer9", ._tmxmd = &TM9MD, ._tmxbr = &TM9BR, ._tmicr = &TM9ICR, .tm_irq = TM9IRQ, .div_timer = MNSCx_DIV_TIMER_16BIT, #else /* CONFIG_MN10300_TTYSM1_TIMER3 */ - .tm_name = "ttySM1/Timer3", + .tm_name = "ttySM1:Timer3", ._tmxmd = &TM3MD, ._tmxbr = (volatile u16 *) &TM3BR, ._tmicr = &TM3ICR, @@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = { .uart.lock = __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), .name = "ttySM2", - .rx_name = "ttySM2/Rx", - .tx_name = "ttySM2/Tx", - .tm_name = "ttySM2/Timer10", + .rx_name = "ttySM2:Rx", + .tx_name = "ttySM2:Tx", + .tm_name = "ttySM2:Timer10", ._iobase = &SC2CTR, ._control = &SC2CTR, ._status = &SC2STR, diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 02b77baa5da..efa0b60c63f 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static __inline__ void __user *compat_alloc_user_space(long len) +static __inline__ void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = ¤t->thread.regs; return (void __user *)regs->gr[30]; diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 396d21a8005..a11d4eac4f9 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = current->thread.regs; unsigned long usp = regs->gpr[1]; diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 104f2007f09..a875c2f542e 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -181,7 +181,7 @@ static inline int is_compat_task(void) #endif -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { unsigned long stack; diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 5016f76ea98..6f57325bb88 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = current_thread_info()->kregs; unsigned long usp = regs->u_regs[UREG_I6]; diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 50794137d71..675c9e11ada 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs) { siginfo_t info; - lock_kernel(); #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); #endif @@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs) #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); #endif - unlock_kernel(); } asmlinkage int diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index f8514e291e1..12b9f352595 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir; - lock_kernel(); if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || (((insn >> 30) & 3) != 3)) goto kill_user; @@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) kill_user: user_mna_trap_fault(regs, insn); out: - unlock_kernel(); + ; } diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index f24d298bda2..b351770cbdd 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c @@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) struct thread_info *tp = current_thread_info(); int window; - lock_kernel(); flush_user_windows(); for(window = 0; window < tp->w_saved; window++) { unsigned long sp = tp->rwbuf_stkptrs[window]; @@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) do_exit(SIGILL); } tp->w_saved = 0; - unlock_kernel(); } diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h index 1246573be59..261aaba092d 100644 --- a/arch/tile/include/arch/chip_tile64.h +++ b/arch/tile/include/arch/chip_tile64.h @@ -150,6 +150,9 @@ /** Is the PROC_STATUS SPR supported? */ #define CHIP_HAS_PROC_STATUS_SPR() 0 +/** Is the DSTREAM_PF SPR supported? */ +#define CHIP_HAS_DSTREAM_PF() 0 + /** Log of the number of mshims we have. */ #define CHIP_LOG_NUM_MSHIMS() 2 diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h index e864c47fc89..70017699a74 100644 --- a/arch/tile/include/arch/chip_tilepro.h +++ b/arch/tile/include/arch/chip_tilepro.h @@ -150,6 +150,9 @@ /** Is the PROC_STATUS SPR supported? */ #define CHIP_HAS_PROC_STATUS_SPR() 1 +/** Is the DSTREAM_PF SPR supported? */ +#define CHIP_HAS_DSTREAM_PF() 0 + /** Log of the number of mshims we have. */ #define CHIP_LOG_NUM_MSHIMS() 2 diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 5a34da6cdd7..8b60ec8b2d1 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr) return (long)(int)(long __force)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->sp - len; @@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, struct compat_sigaction; struct compat_siginfo; struct compat_sigaltstack; -long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, - compat_uptr_t __user *envp); +long compat_sys_execve(const char __user *path, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, struct compat_sigaction __user *oact, size_t sigsetsize); diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 8c95bef3fa4..ee43328713a 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr) #define iowrite32 writel #define iowrite64 writeq -static inline void *memcpy_fromio(void *dst, void *src, int len) +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, + size_t len) { int x; BUG_ON((unsigned long)src & 0x3); for (x = 0; x < len; x += 4) *(u32 *)(dst + x) = readl(src + x); - return dst; } -static inline void *memcpy_toio(void *dst, void *src, int len) +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, + size_t len) { int x; BUG_ON((unsigned long)dst & 0x3); for (x = 0; x < len; x += 4) writel(*(u32 *)(src + x), dst + x); - return dst; } /* diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index d942d09b252..ccd5f842568 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -103,6 +103,18 @@ struct thread_struct { /* Any other miscellaneous processor state bits */ unsigned long proc_status; #endif +#if !CHIP_HAS_FIXED_INTVEC_BASE() + /* Interrupt base for PL0 interrupts */ + unsigned long interrupt_vector_base; +#endif +#if CHIP_HAS_TILE_RTF_HWM() + /* Tile cache retry fifo high-water mark */ + unsigned long tile_rtf_hwm; +#endif +#if CHIP_HAS_DSTREAM_PF() + /* Data stream prefetch control */ + unsigned long dstream_pf; +#endif #ifdef CONFIG_HARDWALL /* Is this task tied to an activated hardwall? */ struct hardwall_info *hardwall; diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h index acdae814e01..4a02bb07397 100644 --- a/arch/tile/include/asm/ptrace.h +++ b/arch/tile/include/asm/ptrace.h @@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t; /* * This struct defines the way the registers are stored on the stack during a - * system call/exception. It should be a multiple of 8 bytes to preserve - * normal stack alignment rules. - * - * Must track <sys/ucontext.h> and <sys/procfs.h> + * system call or exception. "struct sigcontext" has the same shape. */ struct pt_regs { /* Saved main processor registers; 56..63 are special. */ @@ -80,11 +77,6 @@ struct pt_regs { #endif /* __ASSEMBLY__ */ -/* Flag bits in pt_regs.flags */ -#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ -#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ -#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ - #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 @@ -101,6 +93,11 @@ struct pt_regs { #ifdef __KERNEL__ +/* Flag bits in pt_regs.flags */ +#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ +#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ +#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ + #ifndef __ASSEMBLY__ #define instruction_pointer(regs) ((regs)->pc) diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h index 7cd7672e3ad..5e2d03336f5 100644 --- a/arch/tile/include/asm/sigcontext.h +++ b/arch/tile/include/asm/sigcontext.h @@ -15,13 +15,21 @@ #ifndef _ASM_TILE_SIGCONTEXT_H #define _ASM_TILE_SIGCONTEXT_H -/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ -#include <asm/ptrace.h> - -/* Must track <sys/ucontext.h> */ +#include <arch/abi.h> +/* + * struct sigcontext has the same shape as struct pt_regs, + * but is simplified since we know the fault is from userspace. + */ struct sigcontext { - struct pt_regs regs; + uint_reg_t gregs[53]; /* General-purpose registers. */ + uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ + uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ + uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ + uint_reg_t pc; /* Program counter. */ + uint_reg_t ics; /* In Interrupt Critical Section? */ + uint_reg_t faultnum; /* Fault number. */ + uint_reg_t pad[5]; }; #endif /* _ASM_TILE_SIGCONTEXT_H */ diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h index eb0253f3220..c1ee1d61d44 100644 --- a/arch/tile/include/asm/signal.h +++ b/arch/tile/include/asm/signal.h @@ -24,6 +24,7 @@ #include <asm-generic/signal.h> #if defined(__KERNEL__) && !defined(__ASSEMBLY__) +struct pt_regs; int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); void do_signal(struct pt_regs *regs); diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h index af165a74537..ce99ffefeac 100644 --- a/arch/tile/include/asm/syscalls.h +++ b/arch/tile/include/asm/syscalls.h @@ -62,10 +62,12 @@ long sys_fork(void); long _sys_fork(struct pt_regs *regs); long sys_vfork(void); long _sys_vfork(struct pt_regs *regs); -long sys_execve(char __user *filename, char __user * __user *argv, - char __user * __user *envp); -long _sys_execve(char __user *filename, char __user * __user *argv, - char __user * __user *envp, struct pt_regs *regs); +long sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); +long _sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, struct pt_regs *regs); /* kernel/signal.c */ long sys_sigaltstack(const stack_t __user *, stack_t __user *); @@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); #endif #ifdef CONFIG_COMPAT -long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, - compat_uptr_t __user *envp); -long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, - compat_uptr_t __user *envp, struct pt_regs *regs); +long compat_sys_execve(const char __user *path, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); +long _compat_sys_execve(const char __user *path, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, + struct pt_regs *regs); long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, struct compat_sigaltstack __user *uoss_ptr); long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 985cc28c74c..84c29111756 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t) #if CHIP_HAS_PROC_STATUS_SPR() t->proc_status = __insn_mfspr(SPR_PROC_STATUS); #endif +#if !CHIP_HAS_FIXED_INTVEC_BASE() + t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0); +#endif +#if CHIP_HAS_TILE_RTF_HWM() + t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM); +#endif +#if CHIP_HAS_DSTREAM_PF() + t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); +#endif } static void restore_arch_state(const struct thread_struct *t) @@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t) #if CHIP_HAS_PROC_STATUS_SPR() __insn_mtspr(SPR_PROC_STATUS, t->proc_status); #endif +#if !CHIP_HAS_FIXED_INTVEC_BASE() + __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base); +#endif #if CHIP_HAS_TILE_RTF_HWM() - /* - * Clear this whenever we switch back to a process in case - * the previous process was monkeying with it. Even if enabled - * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a - * performance hint, so isn't worth a full save/restore. - */ - __insn_mtspr(SPR_TILE_RTF_HWM, 0); + __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm); +#endif +#if CHIP_HAS_DSTREAM_PF() + __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf); #endif } @@ -561,8 +570,9 @@ out: } #ifdef CONFIG_COMPAT -long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, - compat_uptr_t __user *envp, struct pt_regs *regs) +long _compat_sys_execve(const char __user *path, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, struct pt_regs *regs) { long error; char *filename; @@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs) regs->regs[51], regs->regs[52], regs->tp); pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); #else - for (i = 0; i < 52; i += 3) + for (i = 0; i < 52; i += 4) pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", i, regs->regs[i], i+1, regs->regs[i+1], diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 45b66a3c991..ce183aa1492 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs, /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; + /* + * Enforce that sigcontext is like pt_regs, and doesn't mess + * up our stack alignment rules. + */ + BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); + BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); + for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __get_user(((long *)regs)[i], - &((long __user *)(&sc->regs))[i]); + err |= __get_user(regs->regs[i], &sc->gregs[i]); regs->faultnum = INT_SWINT_1_SIGRETURN; - err |= __get_user(*pr0, &sc->regs.regs[0]); + err |= __get_user(*pr0, &sc->gregs[0]); return err; } @@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) int i, err = 0; for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __put_user(((long *)regs)[i], - &((long __user *)(&sc->regs))[i]); + err |= __put_user(regs->regs[i], &sc->gregs[i]); return err; } @@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = (unsigned long) ka->sa.sa_handler; regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = (unsigned long) frame; regs->lr = restorer; regs->regs[0] = (unsigned long) usig; - - if (ka->sa.sa_flags & SA_SIGINFO) { - /* Need extra arguments, so mark to restore caller-saves. */ - regs->regs[1] = (unsigned long) &frame->info; - regs->regs[2] = (unsigned long) &frame->uc; - regs->flags |= PT_FLAGS_CALLER_SAVES; - } + regs->regs[1] = (unsigned long) &frame->info; + regs->regs[2] = (unsigned long) &frame->uc; + regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 38a68b0b458..ea2e0ce2838 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) pr_err(" <received signal %d>\n", frame->info.si_signo); } - return &frame->uc.uc_mcontext.regs; + return (struct pt_regs *)&frame->uc.uc_mcontext; } return NULL; } diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index b86feabed69..518bb99c339 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -50,7 +50,12 @@ /* * Reload arg registers from stack in case ptrace changed them. * We don't reload %eax because syscall_trace_enter() returned - * the value it wants us to use in the table lookup. + * the %rax value we should see. Instead, we just truncate that + * value to 32 bits again as we did on entry from user mode. + * If it's a new value set by user_regset during entry tracing, + * this matches the normal truncation of the user-mode value. + * If it's -1 to make us punt the syscall, then (u32)-1 is still + * an appropriately invalid value. */ .macro LOAD_ARGS32 offset, _r9=0 .if \_r9 @@ -60,6 +65,7 @@ movl \offset+48(%rsp),%edx movl \offset+56(%rsp),%esi movl \offset+64(%rsp),%edi + movl %eax,%eax /* zero extension */ .endm .macro CFI_STARTPROC32 simple @@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz sysenter_tracesys - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys sysenter_do_call: IA32_ARG_FIXUP @@ -195,7 +201,7 @@ sysexit_from_sys_call: movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ call audit_syscall_entry movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys movl %ebx,%edi /* reload 1st syscall arg */ movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ @@ -248,7 +254,7 @@ sysenter_tracesys: call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ jmp sysenter_do_call CFI_ENDPROC @@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz cstar_tracesys - cmpl $IA32_NR_syscalls-1,%eax + cmpq $IA32_NR_syscalls-1,%rax ja ia32_badsys cstar_do_call: IA32_ARG_FIXUP 1 @@ -367,7 +373,7 @@ cstar_tracesys: LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ RESTORE_REST xchgl %ebp,%r9d - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ jmp cstar_do_call END(ia32_cstar_target) @@ -425,7 +431,7 @@ ENTRY(ia32_syscall) orl $TS_COMPAT,TI_status(%r10) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) jnz ia32_tracesys - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys ia32_do_call: IA32_ARG_FIXUP @@ -444,7 +450,7 @@ ia32_tracesys: call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ jmp ia32_do_call END(ia32_syscall) diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 306160e58b4..1d9cd27c292 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->sp - len; diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 51cfd730ac5..1f99ecfc48e 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -152,9 +152,14 @@ struct x86_emulate_ops { struct operand { enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; unsigned int bytes; - unsigned long orig_val, *ptr; + union { + unsigned long orig_val; + u64 orig_val64; + }; + unsigned long *ptr; union { unsigned long val; + u64 val64; char valptr[sizeof(unsigned long) + 2]; }; }; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d632934cb63..26a863a9c2a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -655,7 +655,7 @@ void restore_sched_clock_state(void) local_irq_save(flags); - get_cpu_var(cyc2ns_offset) = 0; + __get_cpu_var(cyc2ns_offset) = 0; offset = cyc2ns_suspend - sched_clock(); for_each_possible_cpu(cpu) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b38bd8b92aa..66ca98aafdd 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { struct decode_cache *c = &ctxt->decode; - u64 old = c->dst.orig_val; + u64 old = c->dst.orig_val64; if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { - c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { - c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | - (u32) c->regs[VCPU_REGS_RBX]; + c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | + (u32) c->regs[VCPU_REGS_RBX]; ctxt->eflags |= EFLG_ZF; } @@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) c->src.valptr, c->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; - c->src.orig_val = c->src.val; + c->src.orig_val64 = c->src.val64; } if (c->src2.type == OP_MEM) { diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8d10c063d7f..4b7b73ce209 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s) if (!found) found = s->kvm->bsp_vcpu; + if (!found) + return; + kvm_vcpu_kick(found); } } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ffed06871c5..63c31450299 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -43,7 +43,6 @@ struct kvm_kpic_state { u8 irr; /* interrupt request register */ u8 imr; /* interrupt mask register */ u8 isr; /* interrupt service register */ - u8 isr_ack; /* interrupt ack detection */ u8 priority_add; /* highest irq priority */ u8 irq_base; u8 read_reg_select; @@ -56,6 +55,7 @@ struct kvm_kpic_state { u8 init4; /* true if 4 byte init */ u8 elcr; /* PIIX edge/trigger selection */ u8 elcr_mask; + u8 isr_ack; /* interrupt ack detection */ struct kvm_pic *pics_state; }; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index a6809645d21..2fef1ef931a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) /* Currently we do not support hierarchy deeper than two level (0,1) */ if (parent != cgroup->top_cgroup) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EPERM); blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); if (!blkcg) diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63c..32a1c123dfb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) int el_ret; unsigned int bytes = bio->bi_size; const unsigned short prio = bio_prio(bio); - const bool sync = (bio->bi_rw & REQ_SYNC); - const bool unplug = (bio->bi_rw & REQ_UNPLUG); - const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; + const bool sync = !!(bio->bi_rw & REQ_SYNC); + const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); + const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; int rw_flags; if ((bio->bi_rw & REQ_HARDBARRIER) && diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 001ab18078f..0749b89c688 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk) kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); + kobject_put(&dev->kobj); return ret; } diff --git a/block/blk.h b/block/blk.h index 6e7dc87141e..d6b911ac002 100644 --- a/block/blk.h +++ b/block/blk.h @@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) static inline int blk_cpu_to_group(int cpu) { + int group = NR_CPUS; #ifdef CONFIG_SCHED_MC const struct cpumask *mask = cpu_coregroup_mask(cpu); - return cpumask_first(mask); + group = cpumask_first(mask); #elif defined(CONFIG_SCHED_SMT) - return cpumask_first(topology_thread_cpumask(cpu)); + group = cpumask_first(topology_thread_cpumask(cpu)); #else return cpu; #endif + if (likely(group < NR_CPUS)) + return group; + return cpu; } /* diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfe..f65c6f01c47 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; +static int cfq_group_idle = HZ / 125; static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ static const int cfq_hist_divisor = 4; @@ -147,6 +148,8 @@ struct cfq_queue { struct cfq_queue *new_cfqq; struct cfq_group *cfqg; struct cfq_group *orig_cfqg; + /* Number of sectors dispatched from queue in single dispatch round */ + unsigned long nr_sectors; }; /* @@ -198,6 +201,8 @@ struct cfq_group { struct hlist_node cfqd_node; atomic_t ref; #endif + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; }; /* @@ -271,6 +276,7 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; + unsigned int cfq_group_idle; unsigned int cfq_latency; unsigned int cfq_group_isolation; @@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy); &cfqg->service_trees[i][j]: NULL) \ +static inline bool iops_mode(struct cfq_data *cfqd) +{ + /* + * If we are not idling on queues and it is a NCQ drive, parallel + * execution of requests is on and measuring time is not possible + * in most of the cases until and unless we drive shallower queue + * depths and that becomes a performance bottleneck. In such cases + * switch to start providing fairness in terms of number of IOs. + */ + if (!cfqd->cfq_slice_idle && cfqd->hw_tag) + return true; + else + return false; +} + static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) { if (cfq_class_idle(cfqq)) @@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) slice_used = cfqq->allocated_slice; } - cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); return slice_used; } @@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, struct cfq_queue *cfqq) { struct cfq_rb_root *st = &cfqd->grp_service_tree; - unsigned int used_sl, charge_sl; + unsigned int used_sl, charge; int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; BUG_ON(nr_sync < 0); - used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); + used_sl = charge = cfq_cfqq_slice_usage(cfqq); - if (!cfq_cfqq_sync(cfqq) && !nr_sync) - charge_sl = cfqq->allocated_slice; + if (iops_mode(cfqd)) + charge = cfqq->slice_dispatch; + else if (!cfq_cfqq_sync(cfqq) && !nr_sync) + charge = cfqq->allocated_slice; /* Can't update vdisktime while group is on service tree */ cfq_rb_erase(&cfqg->rb_node, st); - cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); + cfqg->vdisktime += cfq_scale_slice(charge, cfqg); __cfq_group_service_tree_add(st, cfqg); /* This group is being expired. Save the context */ @@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, st->min_vdisktime); + cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" + " sect=%u", used_sl, cfqq->slice_dispatch, charge, + iops_mode(cfqd), cfqq->nr_sectors); cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); cfq_blkiocg_set_start_empty_time(&cfqg->blkg); } @@ -1587,6 +1612,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; + cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); @@ -1839,6 +1865,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(!service_tree); BUG_ON(!service_tree->count); + if (!cfqd->cfq_slice_idle) + return false; + /* We never do for idle class queues. */ if (prio == IDLE_WORKLOAD) return false; @@ -1863,7 +1892,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_io_context *cic; - unsigned long sl; + unsigned long sl, group_idle = 0; /* * SSD device without seek penalty, disable idling. But only do so @@ -1879,8 +1908,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) /* * idle is disabled, either manually or by past process history */ - if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) - return; + if (!cfq_should_idle(cfqd, cfqq)) { + /* no queue idling. Check for group idling */ + if (cfqd->cfq_group_idle) + group_idle = cfqd->cfq_group_idle; + else + return; + } /* * still active requests from this queue, don't idle @@ -1907,13 +1941,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) return; } + /* There are other queues in the group, don't do group idle */ + if (group_idle && cfqq->cfqg->nr_cfqq > 1) + return; + cfq_mark_cfqq_wait_request(cfqq); - sl = cfqd->cfq_slice_idle; + if (group_idle) + sl = cfqd->cfq_group_idle; + else + sl = cfqd->cfq_slice_idle; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); - cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, + group_idle ? 1 : 0); } /* @@ -1929,9 +1971,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); cfqq->dispatched++; + (RQ_CFQG(rq))->dispatched++; elv_dispatch_sort(q, rq); cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; + cfqq->nr_sectors += blk_rq_sectors(rq); cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), rq_data_dir(rq), rq_is_sync(rq)); } @@ -2198,7 +2242,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) cfqq = NULL; goto keep_queue; } else - goto expire; + goto check_group_idle; } /* @@ -2226,8 +2270,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ - if (timer_pending(&cfqd->idle_slice_timer) || - (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { + if (timer_pending(&cfqd->idle_slice_timer)) { + cfqq = NULL; + goto keep_queue; + } + + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } + + /* + * If group idle is enabled and there are requests dispatched from + * this group, wait for requests to complete. + */ +check_group_idle: + if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 + && cfqq->cfqg->dispatched) { cfqq = NULL; goto keep_queue; } @@ -3375,6 +3434,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) WARN_ON(!cfqq->dispatched); cfqd->rq_in_driver--; cfqq->dispatched--; + (RQ_CFQG(rq))->dispatched--; cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), rq_io_start_time_ns(rq), rq_data_dir(rq), rq_is_sync(rq)); @@ -3404,7 +3464,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) * the queue. */ if (cfq_should_wait_busy(cfqd, cfqq)) { - cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; + unsigned long extend_sl = cfqd->cfq_slice_idle; + if (!cfqd->cfq_slice_idle) + extend_sl = cfqd->cfq_group_idle; + cfqq->slice_end = jiffies + extend_sl; cfq_mark_cfqq_wait_busy(cfqq); cfq_log_cfqq(cfqd, cfqq, "will busy wait"); } @@ -3850,6 +3913,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_group_idle = cfq_group_idle; cfqd->cfq_latency = 1; cfqd->cfq_group_isolation = 0; cfqd->hw_tag = -1; @@ -3922,6 +3986,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); @@ -3954,6 +4019,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, @@ -3975,6 +4041,7 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), + CFQ_ATTR(group_idle), CFQ_ATTR(low_latency), CFQ_ATTR(group_isolation), __ATTR_NULL @@ -4028,6 +4095,12 @@ static int __init cfq_init(void) if (!cfq_slice_idle) cfq_slice_idle = 1; +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfq_group_idle) + cfq_group_idle = 1; +#else + cfq_group_idle = 0; +#endif if (cfq_slab_setup()) return -ENOMEM; diff --git a/block/elevator.c b/block/elevator.c index ec585c9554d..205b09a5bd9 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) { struct elevator_queue *old_elevator, *e; void *data; + int err; /* * Allocate new elevator */ e = elevator_alloc(q, new_e); if (!e) - return 0; + return -ENOMEM; data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); - return 0; + return -ENOMEM; } /* @@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) __elv_unregister_queue(old_elevator); - if (elv_register_queue(q)) + err = elv_register_queue(q); + if (err) goto fail_register; /* @@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); - return 1; + return 0; fail_register: /* @@ -1071,17 +1073,19 @@ fail_register: queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); - return 0; + return err; } -ssize_t elv_iosched_store(struct request_queue *q, const char *name, - size_t count) +/* + * Switch this queue to the given IO scheduler. + */ +int elevator_change(struct request_queue *q, const char *name) { char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; if (!q->elevator) - return count; + return -ENXIO; strlcpy(elevator_name, name, sizeof(elevator_name)); e = elevator_get(strstrip(elevator_name)); @@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); - return count; + return 0; } - if (!elevator_switch(q, e)) - printk(KERN_ERR "elevator: switch to %s failed\n", - elevator_name); - return count; + return elevator_switch(q, e); +} +EXPORT_SYMBOL(elevator_change); + +ssize_t elv_iosched_store(struct request_queue *q, const char *name, + size_t count) +{ + int ret; + + if (!q->elevator) + return count; + + ret = elevator_change(q, name); + if (!ret) + return count; + + printk(KERN_ERR "elevator: switch to %s failed\n", name); + return ret; } ssize_t elv_iosched_show(struct request_queue *q, char *name) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5419a49ff13..276d5a701dc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; init_completion(&dev->power.completion); + complete_all(&dev->power.completion); dev->power.wakeup_count = 0; pm_runtime_init(dev); } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 31064df1370..6124c2fd2d3 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h, spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; + if (h->Qdepth > h->maxQsinceinit) + h->maxQsinceinit = h->Qdepth; start_io(h); spin_unlock_irqrestore(&h->lock, flags); } @@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + /* The doorbell reset seems to cause lockups on some Smart + * Arrays (e.g. P410, P410i, maybe others). Until this is + * fixed or at least isolated, avoid the doorbell reset. + */ + use_doorbell = 0; + rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; @@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, h->scatter_list = kmalloc(h->max_commands * sizeof(struct scatterlist *), GFP_KERNEL); + if (!h->scatter_list) + goto clean4; + for (k = 0; k < h->nr_cmds; k++) { h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * h->maxsgentries, diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f3c636d2371..91797bbbe70 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { - bool barrier = (bio->bi_rw & REQ_HARDBARRIER); + bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); struct file *file = lo->lo_backing_file; if (barrier) { diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index b82c5ce5e9d..76fa3deaee8 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev) host->breq->queuedata = host; /* mflash is random device, thanx for the noop */ - elevator_exit(host->breq->elevator); - err = elevator_init(host->breq, "noop"); + err = elevator_change(host->breq, "noop"); if (err) { printk(KERN_ERR "%s:%d (elevator_init) fail\n", __func__, __LINE__); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index d2ab01e90a9..dcbeb98f195 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, if (connector->funcs->force) connector->funcs->force(connector); } else { - connector->status = connector->funcs->detect(connector); - drm_helper_hpd_irq_event(dev); + connector->status = connector->funcs->detect(connector, true); + drm_kms_helper_poll_enable(dev); } if (connector->status == connector_status_disconnected) { @@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; if (mode_changed) { - old_fb = set->crtc->fb; - set->crtc->fb = set->fb; set->crtc->enabled = (set->mode != NULL); if (set->mode != NULL) { DRM_DEBUG_KMS("attempting to set mode from" " userspace\n"); drm_mode_debug_printmodeline(set->mode); + old_fb = set->crtc->fb; + set->crtc->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, old_fb)) { @@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work) !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; - status = connector->funcs->detect(connector); + status = connector->funcs->detect(connector, false); if (old_status != status) changed = true; } diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index e20f78b542a..f5bd9e590c8 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, dev->hose = pdev->sysdata; #endif + mutex_lock(&drm_global_mutex); + if ((ret = drm_fill_in_dev(dev, ent, driver))) { printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); goto err_g2; @@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, pci_name(pdev), dev->primary->index); + mutex_unlock(&drm_global_mutex); return 0; err_g4: @@ -210,6 +213,7 @@ err_g2: pci_disable_device(pdev); err_g1: kfree(dev); + mutex_unlock(&drm_global_mutex); return ret; } EXPORT_SYMBOL(drm_get_pci_dev); diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 460e9a3afa8..92d1d0fb7b7 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev, dev->platformdev = platdev; dev->dev = &platdev->dev; + mutex_lock(&drm_global_mutex); + ret = drm_fill_in_dev(dev, NULL, driver); if (ret) { @@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev, list_add_tail(&dev->driver_item, &driver->device_list); + mutex_unlock(&drm_global_mutex); + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, dev->primary->index); @@ -100,6 +104,7 @@ err_g2: drm_put_minor(&dev->control); err_g1: kfree(dev); + mutex_unlock(&drm_global_mutex); return ret; } EXPORT_SYMBOL(drm_get_platform_dev); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 86118a74223..85da4c40694 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device, struct drm_connector *connector = to_drm_connector(device); enum drm_connector_status status; - status = connector->funcs->detect(connector); + status = connector->funcs->detect(connector, true); return snprintf(buf, PAGE_SIZE, "%s\n", drm_get_connector_status_name(status)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 59457e83b01..744225ebb4b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data) i915_seqno_passed(i915_get_gem_seqno(dev, &dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) { + bool missed_wakeup = false; + dev_priv->hangcheck_count = 0; /* Issue a wake-up to catch stuck h/w. */ - if (dev_priv->render_ring.waiting_gem_seqno | - dev_priv->bsd_ring.waiting_gem_seqno) { - DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); - if (dev_priv->render_ring.waiting_gem_seqno) - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - if (dev_priv->bsd_ring.waiting_gem_seqno) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + if (dev_priv->render_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->render_ring.irq_queue)) { + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + missed_wakeup = true; + } + + if (dev_priv->bsd_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + missed_wakeup = true; } + + if (missed_wakeup) + DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); return; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d094e912922..4f5e15577e8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2206,9 +2206,17 @@ #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) +#define WM1_LP_FBC_LP1_MASK (0xf<<20) +#define WM1_LP_FBC_LP1_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) +#define WM2_LP_ILK 0x4510c +#define WM2_LP_EN (1<<31) +#define WM3_LP_ILK 0x45110 +#define WM3_LP_EN (1<<31) +#define WM1S_LP_ILK 0x45120 +#define WM1S_LP_EN (1<<31) /* Memory latency timer register */ #define MLTR_ILK 0x11222 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4b7735196cd..a02a8df7372 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder return status; } -static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) +static enum drm_connector_status +intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct drm_encoder *encoder = intel_attached_encoder(connector); @@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto if (intel_crt_detect_ddc(encoder)) return connector_status_connected; + if (!force) + return connector->status; + /* for pre-945g platforms use load detect */ if (encoder->crtc && encoder->crtc->enabled) { status = intel_crt_load_detect(encoder->crtc, intel_encoder); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 40cc5da264a..19daead5b52 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2767,14 +2767,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, /* Don't promote wm_size to unsigned... */ if (wm_size > (long)wm->max_wm) wm_size = wm->max_wm; - if (wm_size <= 0) { + if (wm_size <= 0) wm_size = wm->default_wm; - DRM_ERROR("Insufficient FIFO for plane, expect flickering:" - " entries required = %ld, available = %lu.\n", - entries_required + wm->guard_size, - wm->fifo_size); - } - return wm_size; } @@ -3388,8 +3382,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, reg_value = I915_READ(WM1_LP_ILK); reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | WM1_LP_CURSOR_MASK); - reg_value |= WM1_LP_SR_EN | - (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; I915_WRITE(WM1_LP_ILK, reg_value); @@ -5675,6 +5668,9 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); } /* * Based on the document from hardware guys the following bits @@ -5696,8 +5692,7 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_DPFC_DIS2 | ILK_CLK_FBC); } - if (IS_GEN6(dev)) - return; + return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -5758,11 +5753,9 @@ void intel_init_clock_gating(struct drm_device *dev) OUT_RING(MI_FLUSH); ADVANCE_LP_RING(); } - } else { + } else DRM_DEBUG_KMS("Failed to allocate render context." - "Disable RC6\n"); - return; - } + "Disable RC6\n"); } if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 51d142939a2..1a51ee07de3 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1386,7 +1386,7 @@ ironlake_dp_detect(struct drm_connector *connector) * \return false if DP port is disconnected. */ static enum drm_connector_status -intel_dp_detect(struct drm_connector *connector) +intel_dp_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a399f4b2c1c..7c9ec1472d4 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, * * Unimplemented. */ -static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) +static enum drm_connector_status +intel_dvo_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ccd4c97e652..926934a482e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, } static enum drm_connector_status -intel_hdmi_detect(struct drm_connector *connector) +intel_hdmi_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b819c108114..6ec39a86ed0 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * connected and closed means disconnected. We also send hotplug events as * needed, using lid status notification from the input layer. */ -static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) +static enum drm_connector_status +intel_lvds_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; enum drm_connector_status status = connector_status_connected; @@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, * the LID nofication event. */ if (connector) - connector->status = connector->funcs->detect(connector); + connector->status = connector->funcs->detect(connector, + false); + /* Don't force modeset on machines where it causes a GPU lockup */ if (dmi_check_system(intel_no_modeset_on_lid)) return NOTIFY_OK; @@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->crtc_mask = (1 << 1); - if (IS_I965G(dev)) - intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e3b7a7ee39c..e8e902d614e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev) if (!analog_connector) return false; - if (analog_connector->funcs->detect(analog_connector) == + if (analog_connector->funcs->detect(analog_connector, false) == connector_status_disconnected) return false; @@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) return status; } -static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) +static enum drm_connector_status +intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; struct drm_encoder *encoder = intel_attached_encoder(connector); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index c671f60ce80..4a117e318a7 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) * we have a pipe programmed in order to probe the TV. */ static enum drm_connector_status -intel_tv_detect(struct drm_connector *connector) +intel_tv_detect(struct drm_connector *connector, bool force) { struct drm_display_mode mode; struct drm_encoder *encoder = intel_attached_encoder(connector); @@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector) if (encoder->crtc && encoder->crtc->enabled) { type = intel_tv_detect_type(intel_tv); - } else { + } else if (force) { struct drm_crtc *crtc; int dpms_mode; @@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector) intel_release_load_detect_pipe(&intel_tv->base, connector, dpms_mode); } else - type = -1; - } - - intel_tv->type = type; + return connector_status_unknown; + } else + return connector->status; if (type < 0) return connector_status_disconnected; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a1473fff06a..87186a4bbf0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, } static enum drm_connector_status -nouveau_connector_detect(struct drm_connector *connector) +nouveau_connector_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct nouveau_connector *nv_connector = nouveau_connector(connector); @@ -246,7 +246,7 @@ detect_analog: } static enum drm_connector_status -nouveau_connector_detect_lvds(struct drm_connector *connector) +nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector) /* Try retrieving EDID via DDC */ if (!dev_priv->vbios.fp_no_ddc) { - status = nouveau_connector_detect(connector); + status = nouveau_connector_detect(connector, force); if (status == connector_status_connected) goto out; } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 464a81a1990..cd0290f946c 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->algo = PLL_ALGO_LEGACY; pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; } - /* There is some evidence (often anecdotal) that RV515 LVDS + /* There is some evidence (often anecdotal) that RV515/RV620 LVDS * (on some boards at least) prefers the legacy algo. I'm not * sure whether this should handled generically or on a * case-by-case quirk basis. Both algos should work fine in the * majority of cases. */ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && - (rdev->family == CHIP_RV515)) { + ((rdev->family == CHIP_RV515) || + (rdev->family == CHIP_RV620))) { /* allow the user to overrride just in case */ if (radeon_new_pll == 1) pll->algo = PLL_ALGO_NEW; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b8b7f010b25..79082d4398a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev) EVERGREEN_MAX_BACKENDS_MASK)); break; } - } else - gb_backend_map = - evergreen_get_tile_pipe_to_backend_map(rdev, - rdev->config.evergreen.max_tile_pipes, - rdev->config.evergreen.max_backends, - ((EVERGREEN_MAX_BACKENDS_MASK << - rdev->config.evergreen.max_backends) & - EVERGREEN_MAX_BACKENDS_MASK)); + } else { + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + gb_backend_map = 0x66442200; + break; + case CHIP_JUNIPER: + gb_backend_map = 0x00006420; + break; + default: + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + } + } rdev->config.evergreen.tile_config = gb_addr_config; WREG32(GB_BACKEND_MAP, gb_backend_map); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e817a0bb5eb..e151f16a8f8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l return false; } elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); - if (elapsed >= 3000) { - /* very likely the improbable case where current - * rptr is equal to last recorded, a while ago, rptr - * this is more likely a false positive update tracking - * information which should force us to be recall at - * latter point - */ - lockup->last_cp_rptr = cp->rptr; - lockup->last_jiffies = jiffies; - return false; - } - if (elapsed >= 1000) { + if (elapsed >= 10000) { dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); return true; } @@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) unsigned long size; unsigned prim_walk; unsigned nverts; + unsigned num_cb = track->num_cb; - for (i = 0; i < track->num_cb; i++) { + if (!track->zb_cb_clear && !track->color_channel_mask && + !track->blend_read_enable) + num_cb = 0; + + for (i = 0; i < num_cb; i++) { if (track->cb[i].robj == NULL) { - if (!(track->zb_cb_clear || track->color_channel_mask || - track->blend_read_enable)) { - continue; - } DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index d13622ae74e..9ceb2a1ce79 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -1,3 +1,28 @@ +/* + * Copyright 2009 Advanced Micro Devices, Inc. + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + #include "drmP.h" #include "drm.h" #include "radeon_drm.h" diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h index fdc3b378cbb..f437d36dd98 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.h +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h @@ -1,3 +1,27 @@ +/* + * Copyright 2009 Advanced Micro Devices, Inc. + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ #ifndef R600_BLIT_SHADERS_H #define R600_BLIT_SHADERS_H diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d8864949e38..250a3a91819 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i /* using get ib will give us the offset into the mipmap bo */ word0 = radeon_get_ib_value(p, idx + 3) << 8; if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { - dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", - w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); - return -EINVAL; + /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", + w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index bd74e428bd1..a04b7a6ad95 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) /* PowerMac8,1 ? */ /* imac g5 isight */ rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; + } else if ((rdev->pdev->device == 0x4a48) && + (rdev->pdev->subsystem_vendor == 0x1002) && + (rdev->pdev->subsystem_device == 0x4a48)) { + /* Mac X800 */ + rdev->mode_info.connector_table = CT_MAC_X800; } else #endif /* CONFIG_PPC_PMAC */ #ifdef CONFIG_PPC64 @@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) CONNECTOR_OBJECT_ID_VGA, &hpd); break; + case CT_MAC_X800: + DRM_INFO("Connector Table: %d (mac x800)\n", + rdev->mode_info.connector_table); + /* DVI - primary dac, internal tmds */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); + hpd.hpd = RADEON_HPD_1; /* ??? */ + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_DFP1_SUPPORT, + 0), + ATOM_DEVICE_DFP1_SUPPORT); + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT1_SUPPORT, + 1), + ATOM_DEVICE_CRT1_SUPPORT); + radeon_add_legacy_connector(dev, 0, + ATOM_DEVICE_DFP1_SUPPORT | + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_DVII, &ddc_i2c, + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); + /* DVI - tv dac, dvo */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); + hpd.hpd = RADEON_HPD_2; /* ??? */ + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_DFP2_SUPPORT, + 0), + ATOM_DEVICE_DFP2_SUPPORT); + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT2_SUPPORT, + 2), + ATOM_DEVICE_CRT2_SUPPORT); + radeon_add_legacy_connector(dev, 1, + ATOM_DEVICE_DFP2_SUPPORT | + ATOM_DEVICE_CRT2_SUPPORT, + DRM_MODE_CONNECTOR_DVII, &ddc_i2c, + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, + &hpd); + break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index a9dd7847d96..ecc1a8fafbf 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } -static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) +static enum drm_connector_status +radeon_lvds_detect(struct drm_connector *connector, bool force) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); @@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, return MODE_OK; } -static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) +static enum drm_connector_status +radeon_vga_detect(struct drm_connector *connector, bool force) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; @@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector, return MODE_OK; } -static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) +static enum drm_connector_status +radeon_tv_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; @@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) * we have to check if this analog encoder is shared with anyone else (TV) * if its shared we have to set the other connector to disconnected. */ -static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) +static enum drm_connector_status +radeon_dvi_detect(struct drm_connector *connector, bool force) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; @@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) return ret; } -static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) +static enum drm_connector_status +radeon_dp_detect(struct drm_connector *connector, bool force) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6dd434ad242..127a395f70f 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1140,17 +1140,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, radeon_crtc->rmx_type = radeon_encoder->rmx_type; else radeon_crtc->rmx_type = RMX_OFF; - src_v = crtc->mode.vdisplay; - dst_v = radeon_crtc->native_mode.vdisplay; - src_h = crtc->mode.hdisplay; - dst_h = radeon_crtc->native_mode.vdisplay; /* copy native mode */ memcpy(&radeon_crtc->native_mode, &radeon_encoder->native_mode, sizeof(struct drm_display_mode)); + src_v = crtc->mode.vdisplay; + dst_v = radeon_crtc->native_mode.vdisplay; + src_h = crtc->mode.hdisplay; + dst_h = radeon_crtc->native_mode.hdisplay; /* fix up for overscan on hdmi */ if (ASIC_IS_AVIVO(rdev) && + (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((radeon_encoder->underscan_type == UNDERSCAN_ON) || ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && drm_detect_hdmi_monitor(radeon_connector->edid) && diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index efbe975312d..17a6602b588 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -204,7 +204,7 @@ struct radeon_i2c_chan { /* mostly for macs, but really any system without connector tables */ enum radeon_connector_table { - CT_NONE, + CT_NONE = 0, CT_GENERIC, CT_IBOOK, CT_POWERBOOK_EXTERNAL, @@ -215,6 +215,7 @@ enum radeon_connector_table { CT_IMAC_G5_ISIGHT, CT_EMAC, CT_RN50_POWER, + CT_MAC_X800, }; enum radeon_dvo_chip { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 2ff5cf78235..7083b1a24df 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -335,7 +335,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) } static enum drm_connector_status - vmw_ldu_connector_detect(struct drm_connector *connector) + vmw_ldu_connector_detect(struct drm_connector *connector, + bool force) { if (vmw_connector_to_ldu(connector)->pref_active) return connector_status_connected; @@ -516,7 +517,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - connector->status = vmw_ldu_connector_detect(connector); + connector->status = vmw_ldu_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, DRM_MODE_ENCODER_LVDS); @@ -610,7 +611,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, ldu->pref_height = 600; ldu->pref_active = false; } - con->status = vmw_ldu_connector_detect(con); + con->status = vmw_ldu_connector_detect(con, true); } mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 0c52899be96..3f729248602 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, @@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 85c6d13c9ff..765a4f53eb5 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -105,6 +105,7 @@ #define USB_VENDOR_ID_ASUS 0x0486 #define USB_DEVICE_ID_ASUS_T91MT 0x0185 +#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186 #define USB_VENDOR_ID_ASUSTEK 0x0b05 #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 @@ -128,6 +129,7 @@ #define USB_VENDOR_ID_BTC 0x046e #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 +#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 #define USB_VENDOR_ID_CANDO 0x2087 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 @@ -149,6 +151,7 @@ #define USB_VENDOR_ID_CHICONY 0x04f2 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 +#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_VENDOR_ID_CIDC 0x1677 @@ -507,6 +510,7 @@ #define USB_VENDOR_ID_UCLOGIC 0x5543 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 +#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 #define USB_VENDOR_ID_VERNIER 0x08f7 #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c index e91437c1890..ac5421d568f 100644 --- a/drivers/hid/hid-mosart.c +++ b/drivers/hid/hid-mosart.c @@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev) static const struct hid_device_id mosart_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, { } }; MODULE_DEVICE_TABLE(hid, mosart_devices); diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c index 5771f851f85..956ed9ac19d 100644 --- a/drivers/hid/hid-topseed.c +++ b/drivers/hid/hid-topseed.c @@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi, static const struct hid_device_id ts_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { } }; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b729c028667..599041a7f67 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co } } else { int skipped_report_id = 0; + int report_id = buf[0]; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; @@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - ((report_type + 1) << 8) | *buf, + ((report_type + 1) << 8) | report_id, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id, if this was a numbered report. */ @@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = { { } }; +struct usb_interface *usbhid_find_interface(int minor) +{ + return usb_find_interface(&hid_driver, minor); +} + static struct hid_driver hid_usb_driver = { .name = "generic-usb", .id_table = hid_usb_table, diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 2643d314762..70da3181c8a 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -33,6 +33,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, + { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, @@ -69,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, @@ -77,6 +79,8 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, + { 0, 0 } }; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 0a29c51114a..681e620eb95 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file) struct hiddev *hiddev; int res; - intf = usb_find_interface(&hiddev_driver, iminor(inode)); + intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; hid = usb_get_intfdata(intf); diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 693fd3e720d..89d2e847dcc 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -42,6 +42,7 @@ void usbhid_submit_report (struct hid_device *hid, struct hid_report *report, unsigned char dir); int usbhid_get_power(struct hid_device *hid); void usbhid_put_power(struct hid_device *hid); +struct usb_interface *usbhid_find_interface(int minor); /* iofl flags */ #define HID_CTRL_RUNNING 1 diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index a382e3dd0a5..6fbeefa3a76 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) static int __devexit bf5xx_nand_remove(struct platform_device *pdev) { struct bf5xx_nand_info *info = to_nand_info(pdev); - struct mtd_info *mtd = NULL; platform_set_drvdata(pdev, NULL); @@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev) * and their partitions, then go through freeing the * resources used */ - mtd = &info->mtd; - if (mtd) { - nand_release(mtd); - kfree(mtd); - } + nand_release(&info->mtd); peripheral_free_list(bfin_nfc_pin_req); bf5xx_nand_dma_remove(info); @@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd) struct nand_chip *chip = mtd->priv; int ret; - ret = nand_scan_ident(mtd, 1); + ret = nand_scan_ident(mtd, 1, NULL); if (ret) return ret; diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index fcf8ceb277d..b2828e84d24 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -67,7 +67,9 @@ #define NFC_V1_V2_CONFIG1_BIG (1 << 5) #define NFC_V1_V2_CONFIG1_RST (1 << 6) #define NFC_V1_V2_CONFIG1_CE (1 << 7) -#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) +#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8) +#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9) +#define NFC_V2_CONFIG1_FP_INT (1 << 11) #define NFC_V1_V2_CONFIG2_INT (1 << 15) @@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host) /* Wait for operation to complete */ wait_op_done(host, true); + memcpy(host->data_buf, host->main_area0, 16); + if (this->options & NAND_BUSWIDTH_16) { - void __iomem *main_buf = host->main_area0; /* compress the ID info */ - writeb(readb(main_buf + 2), main_buf + 1); - writeb(readb(main_buf + 4), main_buf + 2); - writeb(readb(main_buf + 6), main_buf + 3); - writeb(readb(main_buf + 8), main_buf + 4); - writeb(readb(main_buf + 10), main_buf + 5); + host->data_buf[1] = host->data_buf[2]; + host->data_buf[2] = host->data_buf[4]; + host->data_buf[3] = host->data_buf[6]; + host->data_buf[4] = host->data_buf[8]; + host->data_buf[5] = host->data_buf[10]; } - memcpy(host->data_buf, host->main_area0, 16); } static uint16_t get_dev_status_v3(struct mxc_nand_host *host) @@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; - uint16_t tmp; - - /* enable interrupt, disable spare enable */ - tmp = readw(NFC_V1_V2_CONFIG1); - tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; - tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; - if (nand_chip->ecc.mode == NAND_ECC_HW) { - tmp |= NFC_V1_V2_CONFIG1_ECC_EN; - } else { - tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; - } + uint16_t config1 = 0; + + if (nand_chip->ecc.mode == NAND_ECC_HW) + config1 |= NFC_V1_V2_CONFIG1_ECC_EN; + + if (nfc_is_v21()) + config1 |= NFC_V2_CONFIG1_FP_INT; + + if (!cpu_is_mx21()) + config1 |= NFC_V1_V2_CONFIG1_INT_MSK; if (nfc_is_v21() && mtd->writesize) { + uint16_t pages_per_block = mtd->erasesize / mtd->writesize; + host->eccsize = get_eccsize(mtd); if (host->eccsize == 4) - tmp |= NFC_V2_CONFIG1_ECC_MODE_4; + config1 |= NFC_V2_CONFIG1_ECC_MODE_4; + + config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6); } else { host->eccsize = 1; } - writew(tmp, NFC_V1_V2_CONFIG1); + writew(config1, NFC_V1_V2_CONFIG1); /* preset operation */ /* Unlock the internal RAM Buffer */ diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 4d89f378020..4d01cda6884 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) goto fail_free_irq; } +#ifdef CONFIG_MTD_PARTITIONS if (mtd_has_cmdlinepart()) { static const char *probes[] = { "cmdlinepart", NULL }; struct mtd_partition *parts; @@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) } return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); +#else + return 0; +#endif fail_free_irq: free_irq(irq, info); @@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); del_mtd_device(mtd); +#ifdef CONFIG_MTD_PARTITIONS del_mtd_partitions(mtd); +#endif irq = platform_get_irq(pdev, 0); if (irq >= 0) free_irq(irq, info); diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index cb443af3d45..a460f1b748c 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c @@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction) do { status = readl(base + S5PC110_DMA_TRANS_STATUS); + if (status & S5PC110_DMA_TRANS_STATUS_TE) { + writel(S5PC110_DMA_TRANS_CMD_TEC, + base + S5PC110_DMA_TRANS_CMD); + return -EIO; + } } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); - if (status & S5PC110_DMA_TRANS_STATUS_TE) { - writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD); - writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); - return -EIO; - } - writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); return 0; @@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { struct onenand_chip *this = mtd->priv; - void __iomem *bufferram; void __iomem *p; void *buf = (void *) buffer; dma_addr_t dma_src, dma_dst; int err; - p = bufferram = this->base + area; + p = this->base + area; if (ONENAND_CURRENT_BUFFERRAM(this)) { if (area == ONENAND_DATARAM) p += this->writesize; @@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ - memcpy(this->page_buf, bufferram, mtd->writesize); + memcpy(this->page_buf, p, mtd->writesize); p = this->page_buf + offset; } diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index a045559c81c..85671adae45 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -1994,10 +1994,9 @@ vortex_error(struct net_device *dev, int status) } } - if (status & RxEarly) { /* Rx early is unused. */ - vortex_rx(dev); + if (status & RxEarly) /* Rx early is unused. */ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); - } + if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) @@ -2298,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id) if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; + if (status & RxEarly) + vortex_rx(dev); + spin_unlock(&vp->window_lock); vortex_error(dev, status); + spin_lock(&vp->window_lock); + window_set(vp, 7); } if (--work_done < 0) { diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 37617abc164..1e620e287ae 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget) b44_tx(bp); /* spin_unlock(&bp->tx_lock); */ } + if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ + bp->istat &= ~ISTAT_RFO; + b44_disable_ints(bp); + ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + } + spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 99197bd54da..53306bf3f40 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -181,6 +181,7 @@ struct be_drvr_stats { u64 be_rx_bytes_prev; u64 be_rx_pkts; u32 be_rx_rate; + u32 be_rx_mcast_pkt; /* number of non ether type II frames dropped where * frame len > length field of Mac Hdr */ u32 be_802_3_dropped_frames; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 3d305494a60..34abcc9403d 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status) while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ - BUG_ON(!is_link_state_evt(compl->flags)); - - /* Interpret compl as a async link evt */ - be_async_link_state_process(adapter, + if (is_link_state_evt(compl->flags)) + be_async_link_state_process(adapter, (struct be_async_event_link_state *) compl); } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { *status = be_mcc_compl_process(adapter, compl); @@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) if (msecs > 4000) { dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); - be_dump_ue(adapter); + be_detect_dump_ue(adapter); return -1; } diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index bdc10a28cfd..ad1e6fac60c 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, extern int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd); extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); -extern void be_dump_ue(struct be_adapter *adapter); +extern void be_detect_dump_ue(struct be_adapter *adapter); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index cd16243c7c3..13f0abbc520 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(be_rx_events)}, {DRVSTAT_INFO(be_tx_compl)}, {DRVSTAT_INFO(be_rx_compl)}, + {DRVSTAT_INFO(be_rx_mcast_pkt)}, {DRVSTAT_INFO(be_ethrx_post_fail)}, {DRVSTAT_INFO(be_802_3_dropped_frames)}, {DRVSTAT_INFO(be_802_3_malformed_frames)}, diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 5d38046402b..a2ec5df0d73 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -167,8 +167,11 @@ #define FLASH_FCoE_BIOS_START_g3 (13631488) #define FLASH_REDBOOT_START_g3 (262144) - - +/************* Rx Packet Type Encoding **************/ +#define BE_UNICAST_PACKET 0 +#define BE_MULTICAST_PACKET 1 +#define BE_BROADCAST_PACKET 2 +#define BE_RSVD_PACKET 3 /* * BE descriptors: host memory data structures whose formats diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 74e146f470c..6eda7a02225 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter) dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; + dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; /* bad pkts received */ dev_stats->rx_errors = port_stats->rx_crc_errors + @@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter) /* no space available in linux */ dev_stats->tx_dropped = 0; - dev_stats->multicast = port_stats->rx_multicast_frames; dev_stats->collisions = 0; /* detailed tx_errors */ @@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter) } static void be_rx_stats_update(struct be_adapter *adapter, - u32 pktsize, u16 numfrags) + u32 pktsize, u16 numfrags, u8 pkt_type) { struct be_drvr_stats *stats = drvr_stats(adapter); @@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter, stats->be_rx_frags += numfrags; stats->be_rx_bytes += pktsize; stats->be_rx_pkts++; + + if (pkt_type == BE_MULTICAST_PACKET) + stats->be_rx_mcast_pkt++; } static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) @@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter, u16 rxq_idx, i, j; u32 pktsize, hdr_len, curr_frag_len, size; u8 *start; + u8 pkt_type; rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); + pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); page_info = get_rx_page_info(adapter, rxq_idx); @@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, BUG_ON(j > MAX_SKB_FRAGS); done: - be_rx_stats_update(adapter, pktsize, num_rcvd); + be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); } /* Process the RX completion indicated by rxcp when GRO is disabled */ @@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; u16 i, rxq_idx = 0, vid, j; u8 vtm; + u8 pkt_type; num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); /* Is it a flush compl that has no data */ @@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); + pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ @@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); } - be_rx_stats_update(adapter, pkt_size, num_rcvd); + be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); } static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) @@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) return 1; } -static inline bool be_detect_ue(struct be_adapter *adapter) -{ - u32 online0 = 0, online1 = 0; - - pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0); - - pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1); - - if (!online0 || !online1) { - adapter->ue_detected = true; - dev_err(&adapter->pdev->dev, - "UE Detected!! online0=%d online1=%d\n", - online0, online1); - return true; - } - - return false; -} - -void be_dump_ue(struct be_adapter *adapter) +void be_detect_dump_ue(struct be_adapter *adapter) { u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; u32 i; @@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter) ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); + if (ue_status_lo || ue_status_hi) { + adapter->ue_detected = true; + dev_err(&adapter->pdev->dev, "UE Detected!!\n"); + } + if (ue_status_lo) { for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { if (ue_status_lo & 1) @@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work) adapter->rx_post_starved = false; be_post_rx_frags(adapter); } - if (!adapter->ue_detected) { - if (be_detect_ue(adapter)) - be_dump_ue(adapter); - } + if (!adapter->ue_detected) + be_detect_dump_ue(adapter); schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cc4cfc3189..3b16f62d560 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * so it can wait */ bond_for_each_slave(bond, slave, i) { + unsigned long trans_start = dev_trans_start(slave->dev); + if (slave->link != BOND_LINK_UP) { - if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && - time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { + if (time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + delta_in_ticks) && + time_in_range(jiffies, + slave->dev->last_rx - delta_in_ticks, + slave->dev->last_rx + delta_in_ticks)) { slave->link = BOND_LINK_UP; slave->state = BOND_STATE_ACTIVE; @@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || - (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { + if (!time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + 2 * delta_in_ticks) || + !time_in_range(jiffies, + slave->dev->last_rx - delta_in_ticks, + slave->dev->last_rx + 2 * delta_in_ticks)) { slave->link = BOND_LINK_DOWN; slave->state = BOND_STATE_BACKUP; @@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i, commit = 0; + unsigned long trans_start; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; if (slave->link != BOND_LINK_UP) { - if (time_before_eq(jiffies, slave_last_rx(bond, slave) + - delta_in_ticks)) { + if (time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + delta_in_ticks)) { + slave->new_link = BOND_LINK_UP; commit++; } @@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ - if (!time_after_eq(jiffies, slave->jiffies + - 2 * delta_in_ticks)) + if (time_in_range(jiffies, + slave->jiffies - delta_in_ticks, + slave->jiffies + 2 * delta_in_ticks)) continue; /* @@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ if (slave->state == BOND_STATE_BACKUP && !bond->current_arp_slave && - time_after(jiffies, slave_last_rx(bond, slave) + - 3 * delta_in_ticks)) { + !time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { + slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * - (more than 2*delta since receive AND * the bond has an IP address) */ + trans_start = dev_trans_start(slave->dev); if ((slave->state == BOND_STATE_ACTIVE) && - (time_after_eq(jiffies, dev_trans_start(slave->dev) + - 2 * delta_in_ticks) || - (time_after_eq(jiffies, slave_last_rx(bond, slave) - + 2 * delta_in_ticks)))) { + (!time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + 2 * delta_in_ticks) || + !time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { + slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i; + unsigned long trans_start; bond_for_each_slave(bond, slave, i) { switch (slave->new_link) { @@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) continue; case BOND_LINK_UP: + trans_start = dev_trans_start(slave->dev); if ((!bond->curr_active_slave && - time_before_eq(jiffies, - dev_trans_start(slave->dev) + - delta_in_ticks)) || + time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + delta_in_ticks)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; bond->current_arp_slave = NULL; diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index b4fb07a6f13..51919fcd50c 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); - if (rxlen > 0) { - skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); - if (!skb) { - /* todo - dump frame and move on */ - } + if (rxlen > 4) { + unsigned int rxalign; + + rxlen -= 4; + rxalign = ALIGN(rxlen, 4); + skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); + if (skb) { - /* two bytes to ensure ip is aligned, and four bytes - * for the status header and 4 bytes of garbage */ - skb_reserve(skb, 2 + 4 + 4); + /* 4 bytes of status header + 4 bytes of + * garbage: we put them before ethernet + * header, so that they are copied, + * but ignored. + */ - rxpkt = skb_put(skb, rxlen - 4) - 8; + rxpkt = skb_put(skb, rxlen) - 8; - /* align the packet length to 4 bytes, and add 4 bytes - * as we're getting the rx status header as well */ - ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8); + ks8851_rdfifo(ks, rxpkt, rxalign + 8); - if (netif_msg_pktdata(ks)) - ks8851_dbg_dumpkkt(ks, rxpkt); + if (netif_msg_pktdata(ks)) + ks8851_dbg_dumpkkt(ks, rxpkt); - skb->protocol = eth_type_trans(skb, ks->netdev); - netif_rx(skb); + skb->protocol = eth_type_trans(skb, ks->netdev); + netif_rx(skb); - ks->netdev->stats.rx_packets++; - ks->netdev->stats.rx_bytes += rxlen - 4; + ks->netdev->stats.rx_packets++; + ks->netdev->stats.rx_bytes += rxlen; + } } ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index bdf2149e529..87f0a93b165 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -38,6 +38,7 @@ #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/of_address.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c index 5ae28c975b3..8cf9d4f56bb 100644 --- a/drivers/net/ll_temac_mdio.c +++ b/drivers/net/ll_temac_mdio.c @@ -10,6 +10,7 @@ #include <linux/phy.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <linux/of_mdio.h> diff --git a/drivers/net/niu.c b/drivers/net/niu.c index bc695d53cdc..fe6983af691 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np, struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; int i, idx, cnt; - u16 n_entries; unsigned long flags; - + int ret = 0; /* put the tcam size here */ nfc->data = tcam_get_size(np); niu_lock_parent(np, flags); - n_entries = nfc->rule_cnt; for (cnt = 0, i = 0; i < nfc->data; i++) { idx = tcam_get_index(np, i); tp = &parent->tcam[idx]; if (!tp->valid) continue; + if (cnt == nfc->rule_cnt) { + ret = -EMSGSIZE; + break; + } rule_locs[cnt] = i; cnt++; } niu_unlock_parent(np, flags); - if (n_entries != cnt) { - /* print warning, this should not happen */ - netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", - np->parent->index, __func__, n_entries, cnt); - } - - return 0; + return ret; } static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 49279b0ee52..f9b509a6b09 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev, unsigned int vcc, void *priv_data) { - int *has_shmem = priv_data; + int *priv = priv_data; + int try = (*priv & 0x1); int i; cistpl_io_t *io = &cfg->io; @@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev, i = p_dev->resource[1]->end = 0; } - *has_shmem = ((cfg->mem.nwin == 1) && - (cfg->mem.win[0].len >= 0x4000)); + *priv &= ((cfg->mem.nwin == 1) && + (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10; + p_dev->resource[0]->start = io->win[i].base; p_dev->resource[0]->end = io->win[i].len; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; + if (!try) + p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; + else + p_dev->io_lines = 16; if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) return try_io_port(p_dev); - return 0; + return -EINVAL; +} + +static hw_info_t *pcnet_try_config(struct pcmcia_device *link, + int *has_shmem, int try) +{ + struct net_device *dev = link->priv; + hw_info_t *local_hw_info; + pcnet_dev_t *info = PRIV(dev); + int priv = try; + int ret; + + ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); + if (ret) { + dev_warn(&link->dev, "no useable port range found\n"); + return NULL; + } + *has_shmem = (priv & 0x10); + + if (!link->irq) + return NULL; + + if (resource_size(link->resource[1]) == 8) { + link->conf.Attributes |= CONF_ENABLE_SPKR; + link->conf.Status = CCSR_AUDIO_ENA; + } + if ((link->manf_id == MANFID_IBM) && + (link->card_id == PRODID_IBM_HOME_AND_AWAY)) + link->conf.ConfigIndex |= 0x10; + + ret = pcmcia_request_configuration(link, &link->conf); + if (ret) + return NULL; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (info->flags & HAS_MISC_REG) { + if ((if_port == 1) || (if_port == 2)) + dev->if_port = if_port; + else + dev_notice(&link->dev, "invalid if_port requested\n"); + } else + dev->if_port = 0; + + if ((link->conf.ConfigBase == 0x03c0) && + (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { + dev_info(&link->dev, + "this is an AX88190 card - use axnet_cs instead.\n"); + return NULL; + } + + local_hw_info = get_hwinfo(link); + if (!local_hw_info) + local_hw_info = get_prom(link); + if (!local_hw_info) + local_hw_info = get_dl10019(link); + if (!local_hw_info) + local_hw_info = get_ax88190(link); + if (!local_hw_info) + local_hw_info = get_hwired(link); + + return local_hw_info; } static int pcnet_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; pcnet_dev_t *info = PRIV(dev); - int ret, start_pg, stop_pg, cm_offset; + int start_pg, stop_pg, cm_offset; int has_shmem = 0; hw_info_t *local_hw_info; dev_dbg(&link->dev, "pcnet_config\n"); - ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); - if (ret) - goto failed; - - if (!link->irq) - goto failed; - - if (resource_size(link->resource[1]) == 8) { - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; - } - if ((link->manf_id == MANFID_IBM) && - (link->card_id == PRODID_IBM_HOME_AND_AWAY)) - link->conf.ConfigIndex |= 0x10; - - ret = pcmcia_request_configuration(link, &link->conf); - if (ret) - goto failed; - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - if (info->flags & HAS_MISC_REG) { - if ((if_port == 1) || (if_port == 2)) - dev->if_port = if_port; - else - printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n"); - } else { - dev->if_port = 0; - } - - if ((link->conf.ConfigBase == 0x03c0) && - (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { - printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n"); - printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n"); - goto failed; - } - - local_hw_info = get_hwinfo(link); - if (local_hw_info == NULL) - local_hw_info = get_prom(link); - if (local_hw_info == NULL) - local_hw_info = get_dl10019(link); - if (local_hw_info == NULL) - local_hw_info = get_ax88190(link); - if (local_hw_info == NULL) - local_hw_info = get_hwired(link); - - if (local_hw_info == NULL) { - printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" - " address for io base %#3lx\n", dev->base_addr); - goto failed; + local_hw_info = pcnet_try_config(link, &has_shmem, 0); + if (!local_hw_info) { + /* check whether forcing io_lines to 16 helps... */ + pcmcia_disable_device(link); + local_hw_info = pcnet_try_config(link, &has_shmem, 1); + if (local_hw_info == NULL) { + dev_notice(&link->dev, "unable to read hardware net" + " address for io base %#3lx\n", dev->base_addr); + goto failed; + } } info->flags = local_hw_info->flags; diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index bbb7951b9c4..ea0461eb2db 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev) if (!netif_running(dev)) return 0; - spin_lock(&priv->lock); - if (priv->shutdown) { /* Re-open the interface and re-init the MAC/DMA - and the rings. */ + and the rings (i.e. on hibernation stage) */ stmmac_open(dev); - goto out_resume; + return 0; } + spin_lock(&priv->lock); + /* Power Down bit, into the PM register, is cleared * automatically as soon as a magic packet or a Wake-up frame * is received. Anyway, it's better to manually clear @@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev) netif_start_queue(dev); -out_resume: spin_unlock(&priv->lock); return 0; } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8ed30fa35d0..b2bcf99e6f0 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = { .ndo_get_stats = &ipheth_stats, }; -static struct device_type ipheth_type = { - .name = "wwan", -}; - static int ipheth_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf, netdev->netdev_ops = &ipheth_netdev_ops; netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; - strcpy(netdev->name, "wwan%d"); + strcpy(netdev->name, "eth%d"); dev = netdev_priv(netdev); dev->udev = udev; @@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf, SET_NETDEV_DEV(netdev, &intf->dev); SET_ETHTOOL_OPS(netdev, &ops); - SET_NETDEV_DEVTYPE(netdev, &ipheth_type); retval = register_netdev(netdev); if (retval) { diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fd69095ef6e..f53412368ce 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | - NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; + NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; ret = register_netdev(dev); if (ret < 0) diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 54aa1c238cb..a5c176598d9 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, c = p_dev->function_config; if (!(c->state & CONFIG_LOCKED)) { - dev_dbg(&s->dev, "Configuration isnt't locked\n"); + dev_dbg(&p_dev->dev, "Configuration isnt't locked\n"); mutex_unlock(&s->ops_mutex); return -EACCES; } @@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh, s->win[w].card_start = offset; ret = s->ops->set_mem_map(s, &s->win[w]); if (ret) - dev_warn(&s->dev, "failed to set_mem_map\n"); + dev_warn(&p_dev->dev, "failed to set_mem_map\n"); mutex_unlock(&s->ops_mutex); return ret; } /* pcmcia_map_mem_page */ @@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, c = p_dev->function_config; if (!(s->state & SOCKET_PRESENT)) { - dev_dbg(&s->dev, "No card present\n"); + dev_dbg(&p_dev->dev, "No card present\n"); ret = -ENODEV; goto unlock; } if (!(c->state & CONFIG_LOCKED)) { - dev_dbg(&s->dev, "Configuration isnt't locked\n"); + dev_dbg(&p_dev->dev, "Configuration isnt't locked\n"); ret = -EACCES; goto unlock; } if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { - dev_dbg(&s->dev, + dev_dbg(&p_dev->dev, "changing Vcc or IRQ is not allowed at this time\n"); ret = -EINVAL; goto unlock; @@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { if (mod->Vpp1 != mod->Vpp2) { - dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); + dev_dbg(&p_dev->dev, + "Vpp1 and Vpp2 must be the same\n"); ret = -EINVAL; goto unlock; } s->socket.Vpp = mod->Vpp1; if (s->ops->set_socket(s, &s->socket)) { - dev_printk(KERN_WARNING, &s->dev, + dev_printk(KERN_WARNING, &p_dev->dev, "Unable to set VPP\n"); ret = -EIO; goto unlock; } } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { - dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); + dev_dbg(&p_dev->dev, + "changing Vcc is not allowed at this time\n"); ret = -EINVAL; goto unlock; } @@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res) win = &s->win[w]; if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { - dev_dbg(&s->dev, "not releasing unknown window\n"); + dev_dbg(&p_dev->dev, "not releasing unknown window\n"); mutex_unlock(&s->ops_mutex); return -EINVAL; } @@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, return -ENODEV; if (req->IntType & INT_CARDBUS) { - dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); + dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n"); return -EINVAL; } @@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, c = p_dev->function_config; if (c->state & CONFIG_LOCKED) { mutex_unlock(&s->ops_mutex); - dev_dbg(&s->dev, "Configuration is locked\n"); + dev_dbg(&p_dev->dev, "Configuration is locked\n"); return -EACCES; } @@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, s->socket.Vpp = req->Vpp; if (s->ops->set_socket(s, &s->socket)) { mutex_unlock(&s->ops_mutex); - dev_printk(KERN_WARNING, &s->dev, + dev_printk(KERN_WARNING, &p_dev->dev, "Unable to set socket state\n"); return -EINVAL; } @@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) int ret = -EINVAL; mutex_lock(&s->ops_mutex); - dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); + dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR", + &c->io[0], &c->io[1]); if (!(s->state & SOCKET_PRESENT)) { - dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); + dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n"); goto out; } if (c->state & CONFIG_LOCKED) { - dev_dbg(&s->dev, "Configuration is locked\n"); + dev_dbg(&p_dev->dev, "Configuration is locked\n"); goto out; } if (c->state & CONFIG_IO_REQ) { - dev_dbg(&s->dev, "IO already configured\n"); + dev_dbg(&p_dev->dev, "IO already configured\n"); goto out; } @@ -601,7 +604,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) c->state |= CONFIG_IO_REQ; p_dev->_io = 1; - dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", + dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR", &c->io[0], &c->io[1]); out: mutex_unlock(&s->ops_mutex); @@ -800,7 +803,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha int w; if (!(s->state & SOCKET_PRESENT)) { - dev_dbg(&s->dev, "No card present\n"); + dev_dbg(&p_dev->dev, "No card present\n"); return -ENODEV; } @@ -809,12 +812,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha req->Size = s->map_size; align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; if (req->Size & (s->map_size-1)) { - dev_dbg(&s->dev, "invalid map size\n"); + dev_dbg(&p_dev->dev, "invalid map size\n"); return -EINVAL; } if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || (req->Base & (align-1))) { - dev_dbg(&s->dev, "invalid base address\n"); + dev_dbg(&p_dev->dev, "invalid base address\n"); return -EINVAL; } if (req->Base) @@ -826,7 +829,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha if (!(s->state & SOCKET_WIN_REQ(w))) break; if (w == MAX_WIN) { - dev_dbg(&s->dev, "all windows are used already\n"); + dev_dbg(&p_dev->dev, "all windows are used already\n"); mutex_unlock(&s->ops_mutex); return -EINVAL; } @@ -837,7 +840,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha win->res = pcmcia_find_mem_region(req->Base, req->Size, align, 0, s); if (!win->res) { - dev_dbg(&s->dev, "allocating mem region failed\n"); + dev_dbg(&p_dev->dev, "allocating mem region failed\n"); mutex_unlock(&s->ops_mutex); return -EINVAL; } @@ -851,7 +854,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha win->card_start = 0; if (s->ops->set_mem_map(s, win) != 0) { - dev_dbg(&s->dev, "failed to set memory mapping\n"); + dev_dbg(&p_dev->dev, "failed to set memory mapping\n"); mutex_unlock(&s->ops_mutex); return -EIO; } @@ -874,7 +877,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha if (win->res) request_resource(&iomem_resource, res); - dev_dbg(&s->dev, "request_window results in %pR\n", res); + dev_dbg(&p_dev->dev, "request_window results in %pR\n", res); mutex_unlock(&s->ops_mutex); *wh = res; diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c index 936bae560fa..dc628cb2e76 100644 --- a/drivers/power/apm_power.c +++ b/drivers/power/apm_power.c @@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source) empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; + break; case SOURCE_VOLTAGE: full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c index c61ffec2ff1..2a10cd36118 100644 --- a/drivers/power/intel_mid_battery.c +++ b/drivers/power/intel_mid_battery.c @@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop) { u32 data[3]; u8 *p = (u8 *)&data[1]; - int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, - IPCMSG_BATTERY, NULL, 0, data, 3); + int err = intel_scu_ipc_command(IPCMSG_BATTERY, + IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3); prop->capacity = data[0]; prop->crnt = *p++; @@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop) static int pmic_scu_ipc_set_charger(int charger) { - return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); + return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger); } /** diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 7d149a8d8d9..2ce2eb71d0f 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); int ret = -EINVAL; - if (info->vol_table && (index < (2 << info->vol_nbits))) { + if (info->vol_table && (index < (1 << info->vol_nbits))) { ret = info->vol_table[index]; if (info->slope_double) ret <<= 1; @@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) max_uV = max_uV >> 1; } if (info->vol_table) { - for (i = 0; i < (2 << info->vol_nbits); i++) { + for (i = 0; i < (1 << info->vol_nbits); i++) { if (!info->vol_table[i]) break; if ((min_uV <= info->vol_table[i]) diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 11790990277..b349266a43d 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) "%s: failed to register regulator %s err %d\n", __func__, ab3100_regulator_desc[i].name, err); - i--; /* remove the already registered regulators */ - while (i > 0) { + while (--i >= 0) regulator_unregister(ab3100_regulators[i].rdev); - i--; - } return err; } diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index dc3f1a49167..28c7ae67cec 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c @@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) if (info->fixed_uV) return info->fixed_uV; - if (selector > info->voltages_len) + if (selector >= info->voltages_len) return -EINVAL; return info->supported_voltages[selector]; @@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id) static __devinit int ab8500_regulator_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); - struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); + struct ab8500_platform_data *pdata; int i, err; if (!ab8500) { dev_err(&pdev->dev, "null mfd parent\n"); return -EINVAL; } + pdata = dev_get_platdata(ab8500->dev); /* register all regulators */ for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { @@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); /* when we fail, un-register all earlier regulators */ - i--; - while (i > 0) { + while (--i >= 0) { info = &ab8500_regulator_info[i]; regulator_unregister(info->regulator); - i--; } return err; } diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index d59d2f2314a..df1fb53c09d 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c @@ -25,7 +25,7 @@ struct ad5398_chip_info { unsigned int current_level; unsigned int current_mask; unsigned int current_offset; - struct regulator_dev rdev; + struct regulator_dev *rdev; }; static int ad5398_calc_current(struct ad5398_chip_info *chip, @@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id); static int __devinit ad5398_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct regulator_dev *rdev; struct regulator_init_data *init_data = client->dev.platform_data; struct ad5398_chip_info *chip; const struct ad5398_current_data_format *df = @@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client, chip->current_offset = df->current_offset; chip->current_mask = (chip->current_level - 1) << chip->current_offset; - rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); - if (IS_ERR(rdev)) { - ret = PTR_ERR(rdev); + chip->rdev = regulator_register(&ad5398_reg, &client->dev, + init_data, chip); + if (IS_ERR(chip->rdev)) { + ret = PTR_ERR(chip->rdev); dev_err(&client->dev, "failed to register %s %s\n", id->name, ad5398_reg.name); goto err; @@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client) { struct ad5398_chip_info *chip = i2c_get_clientdata(client); - regulator_unregister(&chip->rdev); + regulator_unregister(chip->rdev); kfree(chip); i2c_set_clientdata(client, NULL); diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index e49d2bd393f..d61ecb885a8 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c @@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c, mutex_init(&pmic->mtx); for (i = 0; i < 3; i++) { - pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, + pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev, init_data, pmic); if (IS_ERR(pmic->rdev[i])) { dev_err(&i2c->dev, "failed to register %s\n", id->name); diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 8867c2710a6..559cfa271a4 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV) if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) return -EINVAL; - if (min_uV >= 3000000) - selector = 3; - if (min_uV < 3000000) - selector = 2; - if (min_uV < 2500000) - selector = 1; if (min_uV < 1800000) selector = 0; + else if (min_uV < 2500000) + selector = 1; + else if (min_uV < 3000000) + selector = 2; + else if (min_uV >= 3000000) + selector = 3; if (max1586_v6_calc_voltage(selector) > max_uV) return -EINVAL; diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index ab67298799f..a1baf1fbe00 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c @@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) if (!max8998) return -ENOMEM; - size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); + size = sizeof(struct regulator_dev *) * pdata->num_regulators; max8998->rdev = kzalloc(size, GFP_KERNEL); if (!max8998->rdev) { kfree(max8998); @@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) } rdev = max8998->rdev; + max8998->dev = &pdev->dev; max8998->iodev = iodev; + max8998->num_regulators = pdata->num_regulators; platform_set_drvdata(pdev, max8998); for (i = 0; i < pdata->num_regulators; i++) { @@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) return 0; err: - for (i = 0; i <= max8998->num_regulators; i++) + for (i = 0; i < max8998->num_regulators; i++) if (rdev[i]) regulator_unregister(rdev[i]); @@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev) struct regulator_dev **rdev = max8998->rdev; int i; - for (i = 0; i <= max8998->num_regulators; i++) + for (i = 0; i < max8998->num_regulators; i++) if (rdev[i]) regulator_unregister(rdev[i]); diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index c239f42aa4a..020f5878d7f 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -626,12 +626,6 @@ fail: return error; } -/** - * tps6507x_remove - TPS6507x driver i2c remove handler - * @client: i2c driver client device structure - * - * Unregister TPS driver as an i2c client device driver - */ static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) { struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index 8cff1413a14..51237fbb1bb 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev) mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; val = (val & mask) >> ri->volt_shift; - if (val > ri->desc.n_voltages) + if (val >= ri->desc.n_voltages) BUG(); return ri->voltages[val] * 1000; @@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev, if (ret) return ret; - return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); + return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit); } static int tps6586x_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index e686cdb61b9..9edf8f69234 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c @@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev, case REGULATOR_MODE_IDLE: ret = wm831x_set_bits(wm831x, ctrl_reg, - WM831X_LDO1_LP_MODE, - WM831X_LDO1_LP_MODE); + WM831X_LDO1_LP_MODE, 0); if (ret < 0) return ret; @@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev, WM831X_LDO1_ON_MODE); if (ret < 0) return ret; + break; case REGULATOR_MODE_STANDBY: ret = wm831x_set_bits(wm831x, ctrl_reg, - WM831X_LDO1_LP_MODE, 0); + WM831X_LDO1_LP_MODE, + WM831X_LDO1_LP_MODE); if (ret < 0) return ret; diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 0e6ed7db936..fe4b8a8a9df 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev) mode = REGULATOR_MODE_NORMAL; } else if (!active && !sleep) mode = REGULATOR_MODE_IDLE; - else if (!sleep) + else if (sleep) mode = REGULATOR_MODE_STANDBY; return mode; diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index b7de02525ec..85cf607fc78 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device) if (!blkdat->request_queue) return -ENOMEM; - elevator_exit(blkdat->request_queue->elevator); - rc = elevator_init(blkdat->request_queue, "noop"); + rc = elevator_change(blkdat->request_queue, "noop"); if (rc) goto cleanup_queue; diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 7d4d2275573..7f11f3e48e1 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); - int len = 0; - int status; + int status = 0; SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); switch (param) { @@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, default: return iscsi_host_get_param(shost, param, buf); } - return len; + return status; } int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 26350e470bc..877324fc594 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, sizeof(*req)); diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index cd05e049d5f..d0c82340f0e 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd) { struct scsi_sense_hdr sshdr; - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); scsi_show_sense_hdr(&sshdr); scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_show_extd_sense(sshdr.asc, sshdr.ascq); } EXPORT_SYMBOL(scsi_print_sense); @@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result); void scsi_print_result(struct scsi_cmnd *cmd) { - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_show_result(cmd->result); } EXPORT_SYMBOL(scsi_print_result); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f5551b5fe5..c5d0606ad09 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + /* The doorbell reset seems to cause lockups on some Smart + * Arrays (e.g. P410, P410i, maybe others). Until this is + * fixed or at least isolated, avoid the doorbell reset. + */ + use_doorbell = 0; + rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index fda4de3440c..e88bbdde49c 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); + WARN_ON(bio->bi_rw & REQ_WRITE); or->in.bio = bio; or->in.total_bytes = len; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 420238cc794..114bc5a8117 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) qla24xx_disable_vp(vha); + vha->flags.delete_progress = 1; + fc_remove_host(vha->host); scsi_remove_host(vha->host); - qla2x00_free_fcports(vha); + if (vha->timer_active) { + qla2x00_vp_stop_timer(vha); + DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" + " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); + } qla24xx_deallocate_vp_id(vha); + /* No pending activities shall be there on the vha now */ + DEBUG(msleep(random32()%10)); /* Just to see if something falls on + * the net we have placed below */ + + BUG_ON(atomic_read(&vha->vref_count)); + + qla2x00_free_fcports(vha); + mutex_lock(&ha->vport_lock); ha->cur_vport_count--; clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); - if (vha->timer_active) { - qla2x00_vp_stop_timer(vha); - DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " - "has stopped\n", - vha->host_no, vha->vp_idx, vha)); - } - if (vha->req->id && !ha->flags.cpu_affinity_enabled) { if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 6cfc28a25eb..b74e6b5743d 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -29,8 +29,6 @@ /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ -/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */ - /* * Macros use for debugging the driver. */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a432ea0c7a..d2a4e153070 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2641,6 +2641,7 @@ struct qla_hw_data { #define MBX_UPDATE_FLASH_ACTIVE 3 struct mutex vport_lock; /* Virtual port synchronization */ + spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ struct completion mbx_cmd_comp; /* Serialize mbx access */ struct completion mbx_intr_comp; /* Used for completion notification */ struct completion dcbx_comp; /* For set port config notification */ @@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host { uint32_t management_server_logged_in :1; uint32_t process_response_queue :1; uint32_t difdix_supported:1; + uint32_t delete_progress:1; } flags; atomic_t loop_state; @@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host { struct req_que *req; int fw_heartbeat_counter; int seconds_since_last_heartbeat; + + atomic_t vref_count; } scsi_qla_host_t; /* @@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host { test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ atomic_read(&ha->loop_state) == LOOP_DOWN) +#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ + atomic_inc(&__vha->vref_count); \ + mb(); \ + if (__vha->flags.delete_progress) { \ + atomic_dec(&__vha->vref_count); \ + __bail = 1; \ + } else { \ + __bail = 0; \ + } \ +} while (0) + +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ + atomic_dec(&__vha->vref_count); \ +} while (0) + + #define qla_printk(level, ha, format, arg...) \ dev_printk(level , &((ha)->pdev->dev) , format , ## arg) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d863ed2619b..9c383baebe2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp) { struct srb_ctx *ctx = sp->ctx; struct srb_iocb *iocb = ctx->u.iocb_cmd; + struct scsi_qla_host *vha = sp->fcport->vha; del_timer_sync(&iocb->timer); kfree(iocb); kfree(ctx); mempool_free(sp, sp->fcport->vha->hw->srb_mempool); + + QLA_VHA_MARK_NOT_BUSY(vha); } inline srb_t * qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, unsigned long tmo) { - srb_t *sp; + srb_t *sp = NULL; struct qla_hw_data *ha = vha->hw; struct srb_ctx *ctx; struct srb_iocb *iocb; + uint8_t bail; + + QLA_VHA_MARK_BUSY(vha, bail); + if (bail) + return NULL; sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); if (!sp) @@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, iocb->timer.function = qla2x00_ctx_sp_timeout; add_timer(&iocb->timer); done: + if (!sp) + QLA_VHA_MARK_NOT_BUSY(vha); return sp; } @@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha) qla2x00_init_response_q_entries(rsp); } + spin_lock_irqsave(&ha->vport_slock, flags); /* Clear RSCN queue. */ list_for_each_entry(vp, &ha->vp_list, list) { vp->rscn_in_ptr = 0; vp->rscn_out_ptr = 0; } + + spin_unlock_irqrestore(&ha->vport_slock, flags); + ha->isp_ops->config_rings(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, /* Bypass virtual ports of the same host. */ found = 0; if (ha->num_vhosts) { + unsigned long flags; + + spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (new_fcport->d_id.b24 == vp->d_id.b24) { found = 1; break; } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (found) continue; } @@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; struct scsi_qla_host *tvp; + unsigned long flags = 0; rval = QLA_SUCCESS; @@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) /* Check for loop ID being already in use. */ found = 0; fcport = NULL; + + spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { list_for_each_entry(fcport, &vp->vp_fcports, list) { if (fcport->loop_id == dev->loop_id && @@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) if (found) break; } + spin_unlock_irqrestore(&ha->vport_slock, flags); /* If not in use then it is free to use. */ if (!found) { @@ -3791,14 +3814,27 @@ void qla2x00_update_fcports(scsi_qla_host_t *base_vha) { fc_port_t *fcport; - struct scsi_qla_host *tvp, *vha; + struct scsi_qla_host *vha; + struct qla_hw_data *ha = base_vha->hw; + unsigned long flags; + spin_lock_irqsave(&ha->vport_slock, flags); /* Go with deferred removal of rport references. */ - list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) - list_for_each_entry(fcport, &vha->vp_fcports, list) + list_for_each_entry(vha, &base_vha->hw->vp_list, list) { + atomic_inc(&vha->vref_count); + list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport && fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) + atomic_read(&fcport->state) != FCS_UNCONFIGURED) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_rport_del(fcport); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + } + atomic_dec(&vha->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); } void @@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); - struct scsi_qla_host *tvp; + unsigned long flags; vha->flags.online = 0; ha->flags.chip_reset_done = 0; @@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha, 0); - list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &base_vha->hw->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_mark_all_devices_lost(vp, 0); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, @@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; - struct scsi_qla_host *tvp; struct req_que *req = ha->req_q_map[0]; + unsigned long flags; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); @@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) DEBUG(printk(KERN_INFO "qla2x00_abort_isp(%ld): succeeded.\n", vha->host_no)); - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { qla_printk(KERN_INFO, ha, "qla2x00_abort_isp: **** FAILED ****\n"); @@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct scsi_qla_host *vp; - struct scsi_qla_host *tvp; + unsigned long flags; status = qla2x00_init_rings(vha); if (!status) { @@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) DEBUG(printk(KERN_INFO "qla82xx_restart_isp(%ld): succeeded.\n", vha->host_no)); - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { qla_printk(KERN_INFO, ha, "qla82xx_restart_isp: **** FAILED ****\n"); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6982ba70e12..28f65be19da 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->result = DID_ERROR << 16; break; } - } else if (!lscsi_status) { + } else { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " "of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); - cp->result = DID_ERROR << 16; - break; + cp->result = DID_ERROR << 16 | lscsi_status; + goto check_scsi_status; } cp->result = DID_OK << 16 | lscsi_status; logit = 0; +check_scsi_status: /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6009b0c6948..a595ec8264f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; - scsi_qla_host_t *tvp; + unsigned long flags; if (rptid_entry->entry_status != 0) return; @@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, return; } - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) break; + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (!vp) return; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 987c5b0ca78..2b69392a71a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) { uint32_t vp_id; struct qla_hw_data *ha = vha->hw; + unsigned long flags; /* Find an empty slot and assign an vp_id */ mutex_lock(&ha->vport_lock); @@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; vha->vp_idx = vp_id; + + spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); + spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); return vp_id; } @@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) { uint16_t vp_id; struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; mutex_lock(&ha->vport_lock); + /* + * Wait for all pending activities to finish before removing vport from + * the list. + * Lock needs to be held for safe removal from the list (it + * ensures no active vp_list traversal while the vport is removed + * from the queue) + */ + spin_lock_irqsave(&ha->vport_slock, flags); + while (atomic_read(&vha->vref_count)) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + + msleep(500); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + list_del(&vha->list); + spin_unlock_irqrestore(&ha->vport_slock, flags); + vp_id = vha->vp_idx; ha->num_vhosts--; clear_bit(vp_id, ha->vp_idx_map); - list_del(&vha->list); + mutex_unlock(&ha->vport_lock); } @@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) { scsi_qla_host_t *vha; struct scsi_qla_host *tvha; + unsigned long flags; + spin_lock_irqsave(&ha->vport_slock, flags); /* Locate matching device in database. */ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { - if (!memcmp(port_name, vha->port_name, WWN_SIZE)) + if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { + spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } @@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) { + /* + * !!! NOTE !!! + * This function, if called in contexts other than vp create, disable + * or delete, please make sure this is synchronized with the + * delete thread. + */ fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) "loop_id=0x%04x :%x\n", vha->host_no, fcport->loop_id, fcport->vp_idx)); - atomic_set(&fcport->state, FCS_DEVICE_DEAD); qla2x00_mark_device_lost(vha, fcport, 0, 0); atomic_set(&fcport->state, FCS_UNCONFIGURED); } @@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { - scsi_qla_host_t *vha, *tvha; + scsi_qla_host_t *vha; struct qla_hw_data *ha = rsp->hw; int i = 0; + unsigned long flags; - list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { + atomic_inc(&vha->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + switch (mb[0]) { case MBA_LIP_OCCURRED: case MBA_LOOP_UP: @@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) qla2x00_async_event(vha, rsp, mb); break; } + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vha->vref_count); } i++; } + spin_unlock_irqrestore(&ha->vport_slock, flags); } int @@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; - struct scsi_qla_host *tvp; + unsigned long flags = 0; if (vha->vp_idx) return; @@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) if (!(ha->current_topology & ISP_CFG_F)) return; - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + ret = qla2x00_do_dpc_vp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); } int diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 915b77a6e19..0a71cc71eab 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp) sufficient_dsds: req_cnt = 1; + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)RD_REG_DWORD_RELAXED( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); if (!sp->ctx) { DEBUG(printk(KERN_INFO @@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } qla2xxx_wake_dpc(vha); + ha->flags.fw_hung = 1; if (ha->flags.mbox_busy) { - ha->flags.fw_hung = 1; ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, - "Due to fw hung, doing premature " - "completion of mbx command\n")); - complete(&ha->mbx_intr_comp); + "Due to fw hung, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); } } - } + } else + vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; } @@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) "%s(): Adapter reset needed!\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + ha->flags.fw_hung = 1; if (ha->flags.mbox_busy) { - ha->flags.fw_hung = 1; ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, - "Need reset, doing premature " - "completion of mbx command\n")); - complete(&ha->mbx_intr_comp); + "Need reset, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); } } else { qla82xx_check_fw_alive(vha); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8c80b49ac1c..1e4bff69525 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2341,16 +2341,28 @@ probe_out: static void qla2x00_remove_one(struct pci_dev *pdev) { - scsi_qla_host_t *base_vha, *vha, *temp; + scsi_qla_host_t *base_vha, *vha; struct qla_hw_data *ha; + unsigned long flags; base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; - list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { - if (vha && vha->fc_vport) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vha, &ha->vp_list, list) { + atomic_inc(&vha->vref_count); + + if (vha && vha->fc_vport) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + fc_vport_terminate(vha->fc_vport); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + + atomic_dec(&vha->vref_count); } + spin_unlock_irqrestore(&ha->vport_slock, flags); set_bit(UNLOADING, &base_vha->dpc_flags); @@ -2975,10 +2987,17 @@ static struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; + uint8_t bail; + + QLA_VHA_MARK_BUSY(vha, bail); + if (bail) + return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); - if (!e) + if (!e) { + QLA_VHA_MARK_NOT_BUSY(vha); return NULL; + } INIT_LIST_HEAD(&e->list); e->type = type; @@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); + + /* For each work completed decrement vha ref count */ + QLA_VHA_MARK_NOT_BUSY(vha); } } diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e75ccb91317..8edbccb3232 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.03-k0" +#define QLA2XXX_VERSION "8.03.04-k0" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 3 +#define QLA_DRIVER_PATCH_VER 4 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ade720422c..ee02d3838a0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) err_exit: scsi_release_buffers(cmd); - scsi_put_command(cmd); cmd->request->special = NULL; + scsi_put_command(cmd); return error; } EXPORT_SYMBOL(scsi_init_io); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2714becc2ea..ffa0689ee84 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); - if (atomic_dec_return(&sdkp->openers) && sdev->removable) { + if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { if (scsi_block_when_processing_errors(sdev)) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } @@ -2625,15 +2625,15 @@ module_exit(exit_sd); static void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_sense_hdr(sshdr); - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_extd_sense(sshdr->asc, sshdr->ascq); } static void sd_print_result(struct scsi_disk *sdkp, int result) { - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_result(result); } diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index a7bc8b7b09a..2c3e89ddf06 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n) static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { - if (label) - sym_print_addr(cp->cmd, "%s: ", label); - else - sym_print_addr(cp->cmd, ""); + sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); @@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np) switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp, NULL, np->msgin); + sym_print_msg(cp, "extended msg ", + np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); @@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np) */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp, NULL, np->msgin); + sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 8dedb266f14..c4399e23565 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void) psc_fifoc = of_iomap(np, 0); if (!psc_fifoc) { pr_err("%s: Can't map FIFOC\n", __func__); + of_node_put(np); return -ENODEV; } diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 141c69554bd..7d475b2a79e 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 8; link->conf.Attributes = CONF_ENABLE_IRQ; if (do_sound) { link->conf.Attributes |= CONF_ENABLE_SPKR; @@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, /*====================================================================*/ +static int pfc_config(struct pcmcia_device *p_dev) +{ + unsigned int port = 0; + struct serial_info *info = p_dev->priv; + + if ((p_dev->resource[1]->end != 0) && + (resource_size(p_dev->resource[1]) == 8)) { + port = p_dev->resource[1]->start; + info->slave = 1; + } else if ((info->manfid == MANFID_OSITECH) && + (resource_size(p_dev->resource[0]) == 0x40)) { + port = p_dev->resource[0]->start + 0x28; + info->slave = 1; + } + if (info->slave) + return setup_serial(p_dev, info, port, p_dev->irq); + + dev_warn(&p_dev->dev, "no usable port range found, giving up\n"); + return -ENODEV; +} + static int simple_config_check(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cf, cistpl_cftable_entry_t *dflt, @@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link) struct serial_info *info = link->priv; int i = -ENODEV, try; - /* If the card is already configured, look up the port and irq */ - if (link->function_config) { - unsigned int port = 0; - if ((link->resource[1]->end != 0) && - (resource_size(link->resource[1]) == 8)) { - port = link->resource[1]->end; - info->slave = 1; - } else if ((info->manfid == MANFID_OSITECH) && - (resource_size(link->resource[0]) == 0x40)) { - port = link->resource[0]->start + 0x28; - info->slave = 1; - } - if (info->slave) { - return setup_serial(link, info, port, - link->irq); - } - } + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[0]->end = 8; /* First pass: look for a config entry that looks normal. * Two tries: without IO aliases, then with aliases */ @@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link) if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) goto found_port; - printk(KERN_NOTICE - "serial_cs: no usable port range found, giving up\n"); + dev_warn(&link->dev, "no usable port range found, giving up\n"); return -1; found_port: @@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link) int i, base2 = 0; /* First, look for a generic full-sized window */ + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[0]->end = info->multi * 8; if (pcmcia_loop_config(link, multi_config_check, &base2)) { /* If that didn't work, look for two windows */ @@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link) info->multi = 2; if (pcmcia_loop_config(link, multi_config_check_notpicky, &base2)) { - printk(KERN_NOTICE "serial_cs: no usable port range" + dev_warn(&link->dev, "no usable port range " "found, giving up\n"); return -ENODEV; } } if (!link->irq) - dev_warn(&link->dev, - "serial_cs: no usable IRQ found, continuing...\n"); + dev_warn(&link->dev, "no usable IRQ found, continuing...\n"); /* * Apply any configuration quirks. @@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link) multifunction cards that ask for appropriate IO port ranges */ if ((info->multi == 0) && (link->has_func_id) && + (link->socket->pcmcia_pfc == 0) && ((link->func_id == CISTPL_FUNCID_MULTI) || (link->func_id == CISTPL_FUNCID_SERIAL))) pcmcia_loop_config(link, serial_check_for_multi, info); @@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link) if (info->quirk && info->quirk->multi != -1) info->multi = info->quirk->multi; - if (info->multi > 1) + dev_info(&link->dev, + "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n", + link->manf_id, link->card_id, + link->socket->pcmcia_pfc, info->multi, info->quirk); + if (link->socket->pcmcia_pfc) + i = pfc_config(link); + else if (info->multi > 1) i = multi_config(link); else i = simple_config(link); @@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link) return 0; failed: - dev_warn(&link->dev, "serial_cs: failed to initialize\n"); + dev_warn(&link->dev, "failed to initialize\n"); serial_remove(link); return -ENODEV; } diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index acd35d1ebd1..4c37c4e2864 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022) msg->state = NULL; if (msg->complete) msg->complete(msg->context); - /* This message is completed, so let's turn off the clock! */ + /* This message is completed, so let's turn off the clocks! */ clk_disable(pl022->clk); + amba_pclk_disable(pl022->adev); } /** @@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work) /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); /* - * We enable the clock here, then the clock will be disabled when + * We enable the clocks here, then the clocks will be disabled when * giveback() is called in each method (poll/interrupt/DMA) */ + amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); restore_state(pl022); flush(pl022); @@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) } /* Disable SSP */ - clk_enable(pl022->clk); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); - clk_disable(pl022->clk); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); @@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) goto err_spi_register; } dev_dbg(dev, "probe succeded\n"); + /* Disable the silicon block pclk and clock it when needed */ + amba_pclk_disable(adev); return 0; err_spi_register: @@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state) return status; } - clk_enable(pl022->clk); + amba_pclk_enable(adev); load_ssp_default_config(pl022); - clk_disable(pl022->clk); + amba_pclk_disable(adev); dev_dbg(&adev->dev, "suspended\n"); return 0; } @@ -1981,7 +1983,7 @@ static int __init pl022_init(void) return amba_driver_register(&pl022_driver); } -module_init(pl022_init); +subsys_initcall(pl022_init); static void __exit pl022_exit(void) { diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index d256cb00604..56247853c29 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c @@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws) wait_till_not_busy(dws); } -static void null_cs_control(u32 command) -{ -} - static int null_writer(struct dw_spi *dws) { u8 n_bytes = dws->n_bytes; @@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws) struct spi_transfer, transfer_list); - if (!last_transfer->cs_change) + if (!last_transfer->cs_change && dws->cs_control) dws->cs_control(MRST_SPI_DEASSERT); msg->state = NULL; @@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) static irqreturn_t dw_spi_irq(int irq, void *dev_id) { struct dw_spi *dws = dev_id; + u16 irq_status, irq_mask = 0x3f; + + irq_status = dw_readw(dws, isr) & irq_mask; + if (!irq_status) + return IRQ_NONE; if (!dws->cur_msg) { spi_mask_intr(dws, SPI_INT_TXEI); @@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data) */ if (dws->cs_control) { if (dws->rx && dws->tx) - chip->tmode = 0x00; + chip->tmode = SPI_TMOD_TR; else if (dws->rx) - chip->tmode = 0x02; + chip->tmode = SPI_TMOD_RO; else - chip->tmode = 0x01; + chip->tmode = SPI_TMOD_TO; - cr0 &= ~(0x3 << SPI_MODE_OFFSET); + cr0 &= ~SPI_TMOD_MASK; cr0 |= (chip->tmode << SPI_TMOD_OFFSET); } @@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi) chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; - - chip->cs_control = null_cs_control; - chip->enable_dma = 0; } /* @@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws) dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); - ret = request_irq(dws->irq, dw_spi_irq, 0, + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, "dw_spi", dws); if (ret < 0) { dev_err(&master->dev, "can not get IRQ\n"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a9e5c79ae52..0bcf4c1601a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -554,11 +554,9 @@ done: EXPORT_SYMBOL_GPL(spi_register_master); -static int __unregister(struct device *dev, void *master_dev) +static int __unregister(struct device *dev, void *null) { - /* note: before about 2.6.14-rc1 this would corrupt memory: */ - if (dev != master_dev) - spi_unregister_device(to_spi_device(dev)); + spi_unregister_device(to_spi_device(dev)); return 0; } @@ -576,8 +574,7 @@ void spi_unregister_master(struct spi_master *master) { int dummy; - dummy = device_for_each_child(master->dev.parent, &master->dev, - __unregister); + dummy = device_for_each_child(&master->dev, NULL, __unregister); device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 97365815a72..c3038da2648 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c @@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) val = readl(regs + S3C64XX_SPI_STATUS); } while (TX_FIFO_LVL(val, sci) && loops--); + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); + /* Flush RxFIFO*/ loops = msecs_to_loops(1); do { @@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) break; } while (loops--); + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); + val = readl(regs + S3C64XX_SPI_CH_CFG); val &= ~S3C64XX_SPI_CH_SW_RST; writel(val, regs + S3C64XX_SPI_CH_CFG); @@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, /* millisecs to xfer 'len' bytes @ 'cur_speed' */ ms = xfer->len * 8 * 1000 / sdd->cur_speed; - ms += 5; /* some tolerance */ + ms += 10; /* some tolerance */ if (dma_mode) { val = msecs_to_jiffies(ms) + 10; val = wait_for_completion_timeout(&sdd->xfer_completion, val); } else { + u32 status; val = msecs_to_loops(ms); do { - val = readl(regs + S3C64XX_SPI_STATUS); - } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); + status = readl(regs + S3C64XX_SPI_STATUS); + } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); } if (!val) @@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) writel(val, regs + S3C64XX_SPI_CLK_CFG); } -void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) +static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) { struct s3c64xx_spi_driver_data *sdd = buf_id; unsigned long flags; @@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, spin_unlock_irqrestore(&sdd->lock, flags); } -void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) +static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) { struct s3c64xx_spi_driver_data *sdd = buf_id; unsigned long flags; @@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, - xfer->len, DMA_TO_DEVICE); + xfer->tx_dma = dma_map_single(dev, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) { dev_err(dev, "dma_map_single Tx failed\n"); xfer->tx_dma = XFER_DMAADDR_INVALID; @@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) return -ENODEV; } + sci = pdev->dev.platform_data; + if (!sci->src_clk_name) { + dev_err(&pdev->dev, + "Board init must call s3c64xx_spi_set_info()\n"); + return -EINVAL; + } + /* Check for availability of necessary resource */ dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); @@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) return -ENOMEM; } - sci = pdev->dev.platform_data; - platform_set_drvdata(pdev, master); sdd = spi_master_get_devdata(master); @@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void) { return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); } -module_init(s3c64xx_spi_init); +subsys_initcall(s3c64xx_spi_init); static void __exit s3c64xx_spi_exit(void) { diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4b99117f3ec..c579dcc9200 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, return 0; } +static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) +{ + INIT_LIST_HEAD(&work->node); + work->fn = fn; + init_waitqueue_head(&work->done); + work->flushing = 0; + work->queue_seq = work->done_seq = 0; +} + /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev) { - struct vhost_work *work = &poll->work; - init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; - INIT_LIST_HEAD(&work->node); - work->fn = fn; - init_waitqueue_head(&work->done); - work->flushing = 0; - work->queue_seq = work->done_seq = 0; + vhost_work_init(&poll->work, fn); } /* Start polling a file. We add ourselves to file's wait queue. The caller must @@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) remove_wait_queue(poll->wqh, &poll->wait); } -/* Flush any work that has been scheduled. When calling this, don't hold any - * locks that are also used by the callback. */ -void vhost_poll_flush(struct vhost_poll *poll) +static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { - struct vhost_work *work = &poll->work; unsigned seq; int left; int flushing; - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); wait_event(work->done, ({ - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); left = seq - work->done_seq <= 0; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); left; })); - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); flushing = --work->flushing; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); BUG_ON(flushing < 0); } -void vhost_poll_queue(struct vhost_poll *poll) +/* Flush any work that has been scheduled. When calling this, don't hold any + * locks that are also used by the callback. */ +void vhost_poll_flush(struct vhost_poll *poll) +{ + vhost_work_flush(poll->dev, &poll->work); +} + +static inline void vhost_work_queue(struct vhost_dev *dev, + struct vhost_work *work) { - struct vhost_dev *dev = poll->dev; - struct vhost_work *work = &poll->work; unsigned long flags; spin_lock_irqsave(&dev->work_lock, flags); @@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) spin_unlock_irqrestore(&dev->work_lock, flags); } +void vhost_poll_queue(struct vhost_poll *poll) +{ + vhost_work_queue(poll->dev, &poll->work); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) return dev->mm == current->mm ? 0 : -EPERM; } +struct vhost_attach_cgroups_struct { + struct vhost_work work; + struct task_struct *owner; + int ret; +}; + +static void vhost_attach_cgroups_work(struct vhost_work *work) +{ + struct vhost_attach_cgroups_struct *s; + s = container_of(work, struct vhost_attach_cgroups_struct, work); + s->ret = cgroup_attach_task_all(s->owner, current); +} + +static int vhost_attach_cgroups(struct vhost_dev *dev) +{ + struct vhost_attach_cgroups_struct attach; + attach.owner = current; + vhost_work_init(&attach.work, vhost_attach_cgroups_work); + vhost_work_queue(dev, &attach.work); + vhost_work_flush(dev, &attach.work); + return attach.ret; +} + /* Caller should have device mutex */ static long vhost_dev_set_owner(struct vhost_dev *dev) { @@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) } dev->worker = worker; - err = cgroup_attach_task_current_cg(worker); + wake_up_process(worker); /* avoid contributing to loadavg */ + + err = vhost_attach_cgroups(dev); if (err) goto err_cgroup; - wake_up_process(worker); /* avoid contributing to loadavg */ return 0; err_cgroup: kthread_stop(worker); + dev->worker = NULL; err_worker: if (dev->mm) mmput(dev->mm); diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c index da03c074e32..4d553d0b8d7 100644 --- a/drivers/video/via/ioctl.c +++ b/drivers/video/via/ioctl.c @@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg) { struct viafb_ioctl_info viainfo; + memset(&viainfo, 0, sizeof(struct viafb_ioctl_info)); + viainfo.viafb_id = VIAID; viainfo.vendor_id = PCI_VIA_VENDOR_ID; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b036677df8c..24efd8ea41b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -213,11 +213,11 @@ config OMAP_WATCHDOG here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. config PNX4008_WATCHDOG - tristate "PNX4008 Watchdog" - depends on ARCH_PNX4008 + tristate "PNX4008 and LPC32XX Watchdog" + depends on ARCH_PNX4008 || ARCH_LPC32XX help Say Y here if to include support for the watchdog timer - in the PNX4008 processor. + in the PNX4008 or LPC32XX processor. This driver can be built as a module by choosing M. The module will be called pnx4008_wdt. diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index 88c83aa5730..f31493e65b3 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -305,7 +305,7 @@ static int __init sbwdog_init(void) if (ret) { printk(KERN_ERR "%s: failed to request irq 1 - %d\n", ident.identity, ret); - return ret; + goto out; } ret = misc_register(&sbwdog_miscdev); @@ -313,14 +313,20 @@ static int __init sbwdog_init(void) printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", ident.identity, timeout / 1000000, (timeout / 100000) % 10); - } else - free_irq(1, (void *)user_dog); + return 0; + } + free_irq(1, (void *)user_dog); +out: + unregister_reboot_notifier(&sbwdog_notifier); + return ret; } static void __exit sbwdog_exit(void) { misc_deregister(&sbwdog_miscdev); + free_irq(1, (void *)user_dog); + unregister_reboot_notifier(&sbwdog_notifier); } module_init(sbwdog_init); diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 458c499c122..18cdeb4c425 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev) wdt->pdev = pdev; mutex_init(&wdt->lock); + /* make sure that the watchdog is disabled */ + ts72xx_wdt_stop(wdt); + error = misc_register(&ts72xx_wdt_miscdev); if (error) { dev_err(&pdev->dev, "failed to register miscdev\n"); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 16c8a2a98c1..899f168fd19 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) fid = filp->private_data; P9_DPRINTK(P9_DEBUG_VFS, - "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid); + "v9fs_dir_release: inode: %p filp: %p fid: %d\n", + inode, filp, fid ? fid->fid : -1); filemap_write_and_wait(inode->i_mapping); - p9_client_clunk(fid); + if (fid) + p9_client_clunk(fid); return 0; } diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index c7c23eab944..9e670d52764 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode, P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } - dentry->d_op = &v9fs_cached_dentry_operations; + if (v9ses->cache) + dentry->d_op = &v9fs_cached_dentry_operations; + else + dentry->d_op = &v9fs_dentry_operations; d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) @@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); generic_fillattr(dentry->d_inode, stat); + p9stat_free(st); kfree(st); return 0; } @@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) retval = strnlen(buffer, buflen); done: + p9stat_free(st); kfree(st); return retval; } @@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, - .mknod = v9fs_vfs_mknod_dotl, + .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index f9311077de6..1d12ba0ed3d 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, fid = v9fs_session_init(v9ses, dev_name, data); if (IS_ERR(fid)) { retval = PTR_ERR(fid); + /* + * we need to call session_close to tear down some + * of the data structure setup by session_init + */ goto close_session; } @@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, retval = -ENOMEM; goto release_sb; } - sb->s_root = root; if (v9fs_proto_dotl(v9ses)) { @@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); if (IS_ERR(st)) { retval = PTR_ERR(st); - goto clunk_fid; + goto release_sb; } v9fs_stat2inode_dotl(st, root->d_inode); @@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, st = p9_client_stat(fid); if (IS_ERR(st)) { retval = PTR_ERR(st); - goto clunk_fid; + goto release_sb; } root->d_inode->i_ino = v9fs_qid2ino(&st->qid); @@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, v9fs_fid_add(root, fid); -P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); + P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); simple_set_mnt(mnt, sb); return 0; clunk_fid: p9_client_clunk(fid); - close_session: v9fs_session_close(v9ses); kfree(v9ses); return retval; - release_sb: + /* + * we will do the session_close and root dentry release + * in the below call. But we need to clunk fid, because we haven't + * attached the fid to dentry so it won't get clunked + * automatically. + */ + p9_client_clunk(fid); deactivate_locked_super(sb); return retval; } @@ -1659,6 +1659,9 @@ long do_io_submit(aio_context_t ctx_id, long nr, if (unlikely(nr < 0)) return -EINVAL; + if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) + nr = LONG_MAX/sizeof(*iocbpp); + if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) return -EFAULT; diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 612a5c38d3c..4d0ff5ee27b 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio) /* Allocate kernel buffer for protection data */ len = sectors * blk_integrity_tuple_size(bi); - buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); + buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); if (unlikely(buf == NULL)) { printk(KERN_ERR "could not allocate integrity buffer\n"); - return -EIO; + return -ENOMEM; } end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 0da1debd499..917b7d449bb 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -2,8 +2,6 @@ config CIFS tristate "CIFS support (advanced network filesystem, SMBFS successor)" depends on INET select NLS - select CRYPTO_MD5 - select CRYPTO_ARC4 help This is the client VFS module for the Common Internet File System (CIFS) protocol which is the successor to the Server Message Block diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 21f0fbd8698..cfd1ce34e0b 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c @@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length, if (compare_oid(oid, oidlen, MSKRB5_OID, MSKRB5_OID_LEN)) server->sec_mskerberos = true; - if (compare_oid(oid, oidlen, KRB5U2U_OID, + else if (compare_oid(oid, oidlen, KRB5U2U_OID, KRB5U2U_OID_LEN)) server->sec_kerberosu2u = true; - if (compare_oid(oid, oidlen, KRB5_OID, + else if (compare_oid(oid, oidlen, KRB5_OID, KRB5_OID_LEN)) server->sec_kerberos = true; - if (compare_oid(oid, oidlen, NTLMSSP_OID, + else if (compare_oid(oid, oidlen, NTLMSSP_OID, NTLMSSP_OID_LEN)) server->sec_ntlmssp = true; diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 709f2296bdb..35042d8f733 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -27,7 +27,6 @@ #include "md5.h" #include "cifs_unicode.h" #include "cifsproto.h" -#include "ntlmssp.h" #include <linux/ctype.h> #include <linux/random.h> @@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24); static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, - struct TCP_Server_Info *server, char *signature) + const struct mac_key *key, char *signature) { - int rc; + struct MD5Context context; - if (cifs_pdu == NULL || server == NULL || signature == NULL) + if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL)) return -EINVAL; - if (!server->ntlmssp.sdescmd5) { - cERROR(1, - "cifs_calculate_signature: can't generate signature\n"); - return -1; - } - - rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash); - if (rc) { - cERROR(1, "cifs_calculate_signature: oould not init md5\n"); - return rc; - } - - if (server->secType == RawNTLMSSP) - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - server->session_key.data.ntlmv2.key, - CIFS_NTLMV2_SESSKEY_SIZE); - else - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - (char *)&server->session_key.data, - server->session_key.len); - - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - cifs_pdu->Protocol, cifs_pdu->smb_buf_length); + cifs_MD5_init(&context); + cifs_MD5_update(&context, (char *)&key->data, key->len); + cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); - rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); - - return rc; + cifs_MD5_final(signature, &context); + return 0; } - int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { @@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, server->sequence_number++; spin_unlock(&GlobalMid_Lock); - rc = cifs_calculate_signature(cifs_pdu, server, smb_signature); + rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key, + smb_signature); if (rc) memset(cifs_pdu->Signature.SecuritySignature, 0, 8); else @@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, } static int cifs_calc_signature2(const struct kvec *iov, int n_vec, - struct TCP_Server_Info *server, char *signature) + const struct mac_key *key, char *signature) { + struct MD5Context context; int i; - int rc; - if (iov == NULL || server == NULL || signature == NULL) + if ((iov == NULL) || (signature == NULL) || (key == NULL)) return -EINVAL; - if (!server->ntlmssp.sdescmd5) { - cERROR(1, "cifs_calc_signature2: can't generate signature\n"); - return -1; - } - - rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash); - if (rc) { - cERROR(1, "cifs_calc_signature2: oould not init md5\n"); - return rc; - } - - if (server->secType == RawNTLMSSP) - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - server->session_key.data.ntlmv2.key, - CIFS_NTLMV2_SESSKEY_SIZE); - else - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - (char *)&server->session_key.data, - server->session_key.len); - + cifs_MD5_init(&context); + cifs_MD5_update(&context, (char *)&key->data, key->len); for (i = 0; i < n_vec; i++) { if (iov[i].iov_len == 0) continue; if (iov[i].iov_base == NULL) { - cERROR(1, "cifs_calc_signature2: null iovec entry"); + cERROR(1, "null iovec entry"); return -EIO; } /* The first entry includes a length field (which does not get @@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, if (i == 0) { if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ break; /* nothing to sign or corrupt header */ - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - iov[i].iov_base + 4, iov[i].iov_len - 4); + cifs_MD5_update(&context, iov[0].iov_base+4, + iov[0].iov_len-4); } else - crypto_shash_update(&server->ntlmssp.sdescmd5->shash, - iov[i].iov_base, iov[i].iov_len); + cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len); } - rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); + cifs_MD5_final(signature, &context); - return rc; + return 0; } + int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { @@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, server->sequence_number++; spin_unlock(&GlobalMid_Lock); - rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); + rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key, + smb_signature); if (rc) memset(cifs_pdu->Signature.SecuritySignature, 0, 8); else @@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, } int cifs_verify_signature(struct smb_hdr *cifs_pdu, - struct TCP_Server_Info *server, + const struct mac_key *mac_key, __u32 expected_sequence_number) { - int rc; + unsigned int rc; char server_response_sig[8]; char what_we_think_sig_should_be[20]; - if (cifs_pdu == NULL || server == NULL) + if ((cifs_pdu == NULL) || (mac_key == NULL)) return -EINVAL; if (cifs_pdu->Command == SMB_COM_NEGOTIATE) @@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, cpu_to_le32(expected_sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; - rc = cifs_calculate_signature(cifs_pdu, server, + rc = cifs_calculate_signature(cifs_pdu, mac_key, what_we_think_sig_should_be); if (rc) @@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, } /* We fill in key by putting in 40 byte array which was allocated by caller */ -int cifs_calculate_session_key(struct session_key *key, const char *rn, +int cifs_calculate_mac_key(struct mac_key *key, const char *rn, const char *password) { char temp_key[16]; @@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, { int rc = 0; int len; - char nt_hash[CIFS_NTHASH_SIZE]; + char nt_hash[16]; + struct HMACMD5Context *pctxt; wchar_t *user; wchar_t *domain; - wchar_t *server; - if (!ses->server->ntlmssp.sdeschmacmd5) { - cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); - return -1; - } + pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL); + + if (pctxt == NULL) + return -ENOMEM; /* calculate md4 hash of password */ E_md4hash(ses->password, nt_hash); - crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash, - CIFS_NTHASH_SIZE); - - rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); - if (rc) { - cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n"); - return rc; - } + /* convert Domainname to unicode and uppercase */ + hmac_md5_init_limK_to_64(nt_hash, 16, pctxt); /* convert ses->userName to unicode and uppercase */ len = strlen(ses->userName); user = kmalloc(2 + (len * 2), GFP_KERNEL); - if (user == NULL) { - cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); - rc = -ENOMEM; + if (user == NULL) goto calc_exit_2; - } len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); UniStrupr(user); - - crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, - (char *)user, 2 * len); + hmac_md5_update((char *)user, 2*len, pctxt); /* convert ses->domainName to unicode and uppercase */ if (ses->domainName) { len = strlen(ses->domainName); domain = kmalloc(2 + (len * 2), GFP_KERNEL); - if (domain == NULL) { - cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure"); - rc = -ENOMEM; + if (domain == NULL) goto calc_exit_1; - } len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, nls_cp); /* the following line was removed since it didn't work well @@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, Maybe converting the domain name earlier makes sense */ /* UniStrupr(domain); */ - crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, - (char *)domain, 2 * len); + hmac_md5_update((char *)domain, 2*len, pctxt); kfree(domain); - } else if (ses->serverName) { - len = strlen(ses->serverName); - - server = kmalloc(2 + (len * 2), GFP_KERNEL); - if (server == NULL) { - cERROR(1, "calc_ntlmv2_hash: server mem alloc failure"); - rc = -ENOMEM; - goto calc_exit_1; - } - len = cifs_strtoUCS((__le16 *)server, ses->serverName, len, - nls_cp); - /* the following line was removed since it didn't work well - with lower cased domain name that passed as an option. - Maybe converting the domain name earlier makes sense */ - /* UniStrupr(domain); */ - - crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, - (char *)server, 2 * len); - - kfree(server); } - - rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash, - ses->server->ntlmv2_hash); - calc_exit_1: kfree(user); calc_exit_2: /* BB FIXME what about bytes 24 through 40 of the signing key? compare with the NTLM example */ + hmac_md5_final(ses->server->ntlmv2_hash, pctxt); + kfree(pctxt); return rc; } -static int -find_domain_name(struct cifsSesInfo *ses) -{ - int rc = 0; - unsigned int attrsize; - unsigned int type; - unsigned char *blobptr; - struct ntlmssp2_name *attrptr; - - if (ses->server->tiblob) { - blobptr = ses->server->tiblob; - attrptr = (struct ntlmssp2_name *) blobptr; - - while ((type = attrptr->type) != 0) { - blobptr += 2; /* advance attr type */ - attrsize = attrptr->length; - blobptr += 2; /* advance attr size */ - if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { - if (!ses->domainName) { - ses->domainName = - kmalloc(attrptr->length + 1, - GFP_KERNEL); - if (!ses->domainName) - return -ENOMEM; - cifs_from_ucs2(ses->domainName, - (__le16 *)blobptr, - attrptr->length, - attrptr->length, - load_nls_default(), false); - } - } - blobptr += attrsize; /* advance attr value */ - attrptr = (struct ntlmssp2_name *) blobptr; - } - } else { - ses->server->tilen = 2 * sizeof(struct ntlmssp2_name); - ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL); - if (!ses->server->tiblob) { - ses->server->tilen = 0; - cERROR(1, "Challenge target info allocation failure"); - return -ENOMEM; - } - memset(ses->server->tiblob, 0x0, ses->server->tilen); - attrptr = (struct ntlmssp2_name *) ses->server->tiblob; - attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); - } - - return rc; -} - -static int -CalcNTLMv2_response(const struct TCP_Server_Info *server, - char *v2_session_response) -{ - int rc; - - if (!server->ntlmssp.sdeschmacmd5) { - cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); - return -1; - } - - crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash, - CIFS_HMAC_MD5_HASH_SIZE); - - rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash); - if (rc) { - cERROR(1, "CalcNTLMv2_response: could not init hmacmd5"); - return rc; - } - - memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE, - server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE); - crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash, - v2_session_response + CIFS_SERVER_CHALLENGE_SIZE, - sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE); - - if (server->tilen) - crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash, - server->tiblob, server->tilen); - - rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash, - v2_session_response); - - return rc; -} - -int -setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, +void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, const struct nls_table *nls_cp) { - int rc = 0; + int rc; struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; + struct HMACMD5Context context; buf->blob_signature = cpu_to_le32(0x00000101); buf->reserved = 0; buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); buf->reserved2 = 0; - - if (!ses->domainName) { - rc = find_domain_name(ses); - if (rc) { - cERROR(1, "could not get domain/server name rc %d", rc); - return rc; - } - } + buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); + buf->names[0].length = 0; + buf->names[1].type = 0; + buf->names[1].length = 0; /* calculate buf->ntlmv2_hash */ rc = calc_ntlmv2_hash(ses, nls_cp); - if (rc) { - cERROR(1, "could not get v2 hash rc %d", rc); - return rc; - } - rc = CalcNTLMv2_response(ses->server, resp_buf); - if (rc) { + if (rc) cERROR(1, "could not get v2 hash rc %d", rc); - return rc; - } - - if (!ses->server->ntlmssp.sdeschmacmd5) { - cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); - return -1; - } - - crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, - ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); + CalcNTLMv2_response(ses, resp_buf); - rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); - if (rc) { - cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n"); - return rc; - } + /* now calculate the MAC key for NTLMv2 */ + hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); + hmac_md5_update(resp_buf, 16, &context); + hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context); - crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, - resp_buf, CIFS_HMAC_MD5_HASH_SIZE); - - rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash, - ses->server->session_key.data.ntlmv2.key); - - memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf, - sizeof(struct ntlmv2_resp)); - ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp); - - return rc; + memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf, + sizeof(struct ntlmv2_resp)); + ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp); } -int -calc_seckey(struct TCP_Server_Info *server) -{ - int rc; - unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE]; - struct crypto_blkcipher *tfm_arc4; - struct scatterlist sgin, sgout; - struct blkcipher_desc desc; - - get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE); - - tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", - 0, CRYPTO_ALG_ASYNC); - if (!tfm_arc4 || IS_ERR(tfm_arc4)) { - cERROR(1, "could not allocate " "master crypto API arc4\n"); - return 1; - } - - desc.tfm = tfm_arc4; - - crypto_blkcipher_setkey(tfm_arc4, - server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE); - sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE); - sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); - rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE); - - if (!rc) - memcpy(server->session_key.data.ntlmv2.key, - sec_key, CIFS_NTLMV2_SESSKEY_SIZE); - - crypto_free_blkcipher(tfm_arc4); - - return 0; -} - -void -cifs_crypto_shash_release(struct TCP_Server_Info *server) -{ - if (server->ntlmssp.md5) - crypto_free_shash(server->ntlmssp.md5); - - if (server->ntlmssp.hmacmd5) - crypto_free_shash(server->ntlmssp.hmacmd5); - - kfree(server->ntlmssp.sdeschmacmd5); - - kfree(server->ntlmssp.sdescmd5); -} - -int -cifs_crypto_shash_allocate(struct TCP_Server_Info *server) +void CalcNTLMv2_response(const struct cifsSesInfo *ses, + char *v2_session_response) { - int rc; - unsigned int size; - - server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); - if (!server->ntlmssp.hmacmd5 || - IS_ERR(server->ntlmssp.hmacmd5)) { - cERROR(1, "could not allocate crypto hmacmd5\n"); - return 1; - } - - server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0); - if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) { - cERROR(1, "could not allocate crypto md5\n"); - rc = 1; - goto cifs_crypto_shash_allocate_ret1; - } - - size = sizeof(struct shash_desc) + - crypto_shash_descsize(server->ntlmssp.hmacmd5); - server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); - if (!server->ntlmssp.sdeschmacmd5) { - cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n"); - rc = -ENOMEM; - goto cifs_crypto_shash_allocate_ret2; - } - server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5; - server->ntlmssp.sdeschmacmd5->shash.flags = 0x0; + struct HMACMD5Context context; + /* rest of v2 struct already generated */ + memcpy(v2_session_response + 8, ses->server->cryptKey, 8); + hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); + hmac_md5_update(v2_session_response+8, + sizeof(struct ntlmv2_resp) - 8, &context); - size = sizeof(struct shash_desc) + - crypto_shash_descsize(server->ntlmssp.md5); - server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL); - if (!server->ntlmssp.sdescmd5) { - cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n"); - rc = -ENOMEM; - goto cifs_crypto_shash_allocate_ret3; - } - server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5; - server->ntlmssp.sdescmd5->shash.flags = 0x0; - - return 0; - -cifs_crypto_shash_allocate_ret3: - kfree(server->ntlmssp.sdeschmacmd5); - -cifs_crypto_shash_allocate_ret2: - crypto_free_shash(server->ntlmssp.md5); - -cifs_crypto_shash_allocate_ret1: - crypto_free_shash(server->ntlmssp.hmacmd5); - - return rc; + hmac_md5_final(v2_session_response, &context); +/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */ } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index c9d0cfc086e..0cdfb8c32ac 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -25,9 +25,6 @@ #include <linux/workqueue.h> #include "cifs_fs_sb.h" #include "cifsacl.h" -#include <crypto/internal/hash.h> -#include <linux/scatterlist.h> - /* * The sizes of various internal tables and strings */ @@ -100,7 +97,7 @@ enum protocolEnum { /* Netbios frames protocol not supported at this time */ }; -struct session_key { +struct mac_key { unsigned int len; union { char ntlm[CIFS_SESS_KEY_SIZE + 16]; @@ -123,21 +120,6 @@ struct cifs_cred { struct cifs_ace *aces; }; -struct sdesc { - struct shash_desc shash; - char ctx[]; -}; - -struct ntlmssp_auth { - __u32 client_flags; - __u32 server_flags; - unsigned char ciphertext[CIFS_CPHTXT_SIZE]; - struct crypto_shash *hmacmd5; - struct crypto_shash *md5; - struct sdesc *sdeschmacmd5; - struct sdesc *sdescmd5; -}; - /* ***************************************************************** * Except the CIFS PDUs themselves all the @@ -200,14 +182,11 @@ struct TCP_Server_Info { /* 16th byte of RFC1001 workstation name is always null */ char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* needed for CIFS PDU signature */ - struct session_key session_key; + struct mac_key mac_signing_key; char ntlmv2_hash[16]; unsigned long lstrp; /* when we got last response from this server */ u16 dialect; /* dialect index that server chose */ /* extended security flavors that server supports */ - unsigned int tilen; /* length of the target info blob */ - unsigned char *tiblob; /* target info blob in challenge response */ - struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool sec_kerberosu2u; /* supports U2U Kerberos */ diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 320e0fd0ba7..14d036d8db1 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -134,12 +134,6 @@ * Size of the session key (crypto key encrypted with the password */ #define CIFS_SESS_KEY_SIZE (24) -#define CIFS_CLIENT_CHALLENGE_SIZE (8) -#define CIFS_SERVER_CHALLENGE_SIZE (8) -#define CIFS_HMAC_MD5_HASH_SIZE (16) -#define CIFS_CPHTXT_SIZE (16) -#define CIFS_NTLMV2_SESSKEY_SIZE (16) -#define CIFS_NTHASH_SIZE (16) /* * Maximum user name length @@ -669,6 +663,7 @@ struct ntlmv2_resp { __le64 time; __u64 client_chal; /* random */ __u32 reserved2; + struct ntlmssp2_name names[2]; /* array of name entries could follow ending in minimum 4 byte struct */ } __attribute__((packed)); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 1378d913384..1d60c655e3e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); extern int decode_negTokenInit(unsigned char *security_blob, int length, struct TCP_Server_Info *server); extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); +extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, - unsigned short int port); + const unsigned short int port); extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); extern void header_assemble(struct smb_hdr *, char /* command */ , const struct cifsTconInfo *, int /* length of @@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, __u32 *); extern int cifs_verify_signature(struct smb_hdr *, - struct TCP_Server_Info *server, + const struct mac_key *mac_key, __u32 expected_sequence_number); -extern int cifs_calculate_session_key(struct session_key *key, const char *rn, +extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn, const char *pass); -extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *, +extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *); +extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, const struct nls_table *); -extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); -extern void cifs_crypto_shash_release(struct TCP_Server_Info *); -extern int calc_seckey(struct TCP_Server_Info *); #ifdef CONFIG_CIFS_WEAK_PW_HASH extern void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, char *lnm_session_key); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 4bda920d1f7..c65c3419dd3 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -604,14 +604,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) else rc = -EINVAL; - if (server->secType == Kerberos) { - if (!server->sec_kerberos && - !server->sec_mskerberos) - rc = -EOPNOTSUPP; - } else if (server->secType == RawNTLMSSP) { - if (!server->sec_ntlmssp) - rc = -EOPNOTSUPP; - } else + if (server->sec_kerberos || server->sec_mskerberos) + server->secType = Kerberos; + else if (server->sec_ntlmssp) + server->secType = RawNTLMSSP; + else rc = -EOPNOTSUPP; } } else diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ec0ea4a43bd..88c84a38bcc 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -400,7 +400,9 @@ incomplete_rcv: cFYI(1, "call to reconnect done"); csocket = server->ssocket; continue; - } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { + } else if (length == -ERESTARTSYS || + length == -EAGAIN || + length == -EINTR) { msleep(1); /* minimum sleep to prevent looping allowing socket to clear and app threads to set tcpStatus CifsNeedReconnect if server hung */ @@ -414,18 +416,6 @@ incomplete_rcv: } else continue; } else if (length <= 0) { - if (server->tcpStatus == CifsNew) { - cFYI(1, "tcp session abend after SMBnegprot"); - /* some servers kill the TCP session rather than - returning an SMB negprot error, in which - case reconnecting here is not going to help, - and so simply return error to mount */ - break; - } - if (!try_to_freeze() && (length == -EINTR)) { - cFYI(1, "cifsd thread killed"); - break; - } cFYI(1, "Reconnect after unexpected peek error %d", length); cifs_reconnect(server); @@ -466,27 +456,19 @@ incomplete_rcv: an error on SMB negprot response */ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", pdu_length); - if (server->tcpStatus == CifsNew) { - /* if nack on negprot (rather than - ret of smb negprot error) reconnecting - not going to help, ret error to mount */ - break; - } else { - /* give server a second to - clean up before reconnect attempt */ - msleep(1000); - /* always try 445 first on reconnect - since we get NACK on some if we ever - connected to port 139 (the NACK is - since we do not begin with RFC1001 - session initialize frame) */ - server->addr.sockAddr.sin_port = - htons(CIFS_PORT); - cifs_reconnect(server); - csocket = server->ssocket; - wake_up(&server->response_q); - continue; - } + /* give server a second to clean up */ + msleep(1000); + /* always try 445 first on reconnect since we get NACK + * on some if we ever connected to port 139 (the NACK + * is since we do not begin with RFC1001 session + * initialize frame) + */ + cifs_set_port((struct sockaddr *) + &server->addr.sockAddr, CIFS_PORT); + cifs_reconnect(server); + csocket = server->ssocket; + wake_up(&server->response_q); + continue; } else if (temp != (char) 0) { cERROR(1, "Unknown RFC 1002 frame"); cifs_dump_mem(" Received Data: ", (char *)smb_buffer, @@ -522,8 +504,7 @@ incomplete_rcv: total_read += length) { length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length - total_read, 0); - if ((server->tcpStatus == CifsExiting) || - (length == -EINTR)) { + if (server->tcpStatus == CifsExiting) { /* then will exit */ reconnect = 2; break; @@ -534,8 +515,9 @@ incomplete_rcv: /* Now we will reread sock */ reconnect = 1; break; - } else if ((length == -ERESTARTSYS) || - (length == -EAGAIN)) { + } else if (length == -ERESTARTSYS || + length == -EAGAIN || + length == -EINTR) { msleep(1); /* minimum sleep to prevent looping, allowing socket to clear and app threads to set tcpStatus @@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) CIFSSMBLogoff(xid, ses); _FreeXid(xid); } - cifs_crypto_shash_release(server); sesInfoFree(ses); cifs_put_tcp_session(server); } @@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) if (ses) { cFYI(1, "Existing smb sess found (status=%d)", ses->status); - /* existing SMB ses has a server reference already */ - cifs_put_tcp_session(server); - mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { @@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) } } mutex_unlock(&ses->session_mutex); + + /* existing SMB ses has a server reference already */ + cifs_put_tcp_session(server); FreeXid(xid); return ses; } @@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) ses->linux_uid = volume_info->linux_uid; ses->overrideSecFlg = volume_info->secFlg; - rc = cifs_crypto_shash_allocate(server); - if (rc) { - cERROR(1, "could not setup hash structures rc %d", rc); - goto get_ses_fail; - } - server->tilen = 0; - server->tiblob = NULL; - mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (!rc) rc = cifs_setup_session(xid, ses, volume_info->local_nls); mutex_unlock(&ses->session_mutex); - if (rc) { - cifs_crypto_shash_release(ses->server); + if (rc) goto get_ses_fail; - } /* success, put it on the list */ write_lock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 86a164f08a7..93f77d438d3 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1462,29 +1462,18 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, { char *fromName = NULL; char *toName = NULL; - struct cifs_sb_info *cifs_sb_source; - struct cifs_sb_info *cifs_sb_target; + struct cifs_sb_info *cifs_sb; struct cifsTconInfo *tcon; FILE_UNIX_BASIC_INFO *info_buf_source = NULL; FILE_UNIX_BASIC_INFO *info_buf_target; int xid, rc, tmprc; - cifs_sb_target = CIFS_SB(target_dir->i_sb); - cifs_sb_source = CIFS_SB(source_dir->i_sb); - tcon = cifs_sb_source->tcon; + cifs_sb = CIFS_SB(source_dir->i_sb); + tcon = cifs_sb->tcon; xid = GetXid(); /* - * BB: this might be allowed if same server, but different share. - * Consider adding support for this - */ - if (tcon != cifs_sb_target->tcon) { - rc = -EXDEV; - goto cifs_rename_exit; - } - - /* * we already have the rename sem so we do not need to * grab it again here to protect the path integrity */ @@ -1519,17 +1508,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, info_buf_target = info_buf_source + 1; tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName, info_buf_source, - cifs_sb_source->local_nls, - cifs_sb_source->mnt_cifs_flags & + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (tmprc != 0) goto unlink_target; - tmprc = CIFSSMBUnixQPathInfo(xid, tcon, - toName, info_buf_target, - cifs_sb_target->local_nls, - /* remap based on source sb */ - cifs_sb_source->mnt_cifs_flags & + tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName, + info_buf_target, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (tmprc == 0 && (info_buf_source->UniqueId == diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index f97851119e6..9aad47a2d62 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) } int -cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, - const unsigned short int port) +cifs_set_port(struct sockaddr *addr, const unsigned short int port) { - if (!cifs_convert_address(dst, src, len)) - return 0; - - switch (dst->sa_family) { + switch (addr->sa_family) { case AF_INET: - ((struct sockaddr_in *)dst)->sin_port = htons(port); + ((struct sockaddr_in *)addr)->sin_port = htons(port); break; case AF_INET6: - ((struct sockaddr_in6 *)dst)->sin6_port = htons(port); + ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); break; default: return 0; } - return 1; } +int +cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, + const unsigned short int port) +{ + if (!cifs_convert_address(dst, src, len)) + return 0; + return cifs_set_port(dst, port); +} + /***************************************************************************** convert a NT status code to a dos class/code *****************************************************************************/ diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h index 1db0f0746a5..49c9a4e7531 100644 --- a/fs/cifs/ntlmssp.h +++ b/fs/cifs/ntlmssp.h @@ -61,19 +61,6 @@ #define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 #define NTLMSSP_NEGOTIATE_56 0x80000000 -/* Define AV Pair Field IDs */ -#define NTLMSSP_AV_EOL 0 -#define NTLMSSP_AV_NB_COMPUTER_NAME 1 -#define NTLMSSP_AV_NB_DOMAIN_NAME 2 -#define NTLMSSP_AV_DNS_COMPUTER_NAME 3 -#define NTLMSSP_AV_DNS_DOMAIN_NAME 4 -#define NTLMSSP_AV_DNS_TREE_NAME 5 -#define NTLMSSP_AV_FLAGS 6 -#define NTLMSSP_AV_TIMESTAMP 7 -#define NTLMSSP_AV_RESTRICTION 8 -#define NTLMSSP_AV_TARGET_NAME 9 -#define NTLMSSP_AV_CHANNEL_BINDINGS 10 - /* Although typedefs are not commonly used for structure definitions */ /* in the Linux kernel, in this particular case they are useful */ /* to more closely match the standards document for NTLMSSP from */ diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 795095f4eac..0a57cb7db5d 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft, static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifsSesInfo *ses) { - unsigned int tioffset; /* challeng message target info area */ - unsigned int tilen; /* challeng message target info area length */ - CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; if (blob_len < sizeof(CHALLENGE_MESSAGE)) { @@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ - ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags); - - tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); - tilen = cpu_to_le16(pblob->TargetInfoArray.Length); - ses->server->tilen = tilen; - if (tilen) { - ses->server->tiblob = kmalloc(tilen, GFP_KERNEL); - if (!ses->server->tiblob) { - cERROR(1, "Challenge target info allocation failure"); - return -ENOMEM; - } - memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen); - } - return 0; } @@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM; + NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM; if (ses->server->secMode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { - flags |= NTLMSSP_NEGOTIATE_SIGN | - NTLMSSP_NEGOTIATE_KEY_XCH | - NTLMSSP_NEGOTIATE_EXTENDED_SEC; - } + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + flags |= NTLMSSP_NEGOTIATE_SIGN; + if (ses->server->secMode & SECMODE_SIGN_REQUIRED) + flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; sec_blob->NegotiateFlags |= cpu_to_le32(flags); @@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, struct cifsSesInfo *ses, const struct nls_table *nls_cp, bool first) { - int rc; - unsigned int size; AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; __u32 flags; unsigned char *tmp; - struct ntlmv2_resp ntlmv2_response = {}; + char ntlm_session_key[CIFS_SESS_KEY_SIZE]; memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; @@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, sec_blob->LmChallengeResponse.Length = 0; sec_blob->LmChallengeResponse.MaximumLength = 0; - sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); - rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp); - if (rc) { - cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc); - goto setup_ntlmv2_ret; - } - size = sizeof(struct ntlmv2_resp); - memcpy(tmp, (char *)&ntlmv2_response, size); - tmp += size; - if (ses->server->tilen > 0) { - memcpy(tmp, ses->server->tiblob, ses->server->tilen); - tmp += ses->server->tilen; - } else - ses->server->tilen = 0; + /* calculate session key, BB what about adding similar ntlmv2 path? */ + SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key); + if (first) + cifs_calculate_mac_key(&ses->server->mac_signing_key, + ntlm_session_key, ses->password); - sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + - ses->server->tilen); + memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE); + sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE); sec_blob->NtChallengeResponse.MaximumLength = - cpu_to_le16(size + ses->server->tilen); + cpu_to_le16(CIFS_SESS_KEY_SIZE); + + tmp += CIFS_SESS_KEY_SIZE; if (ses->domainName == NULL) { sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); @@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, MAX_USERNAME_SIZE, nls_cp); len *= 2; /* unicode is 2 bytes each */ + len += 2; /* trailing null */ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->DomainName.Length = cpu_to_le16(len); sec_blob->DomainName.MaximumLength = cpu_to_le16(len); @@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, len = cifs_strtoUCS((__le16 *)tmp, ses->userName, MAX_USERNAME_SIZE, nls_cp); len *= 2; /* unicode is 2 bytes each */ + len += 2; /* trailing null */ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->UserName.Length = cpu_to_le16(len); sec_blob->UserName.MaximumLength = cpu_to_le16(len); @@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, sec_blob->WorkstationName.MaximumLength = 0; tmp += 2; - if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && - !calc_seckey(ses->server)) { - memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); - sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); - sec_blob->SessionKey.MaximumLength = - cpu_to_le16(CIFS_CPHTXT_SIZE); - tmp += CIFS_CPHTXT_SIZE; - } else { - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); - sec_blob->SessionKey.Length = 0; - sec_blob->SessionKey.MaximumLength = 0; - } - - ses->server->sequence_number = 0; - -setup_ntlmv2_ret: - if (ses->server->tilen > 0) - kfree(ses->server->tiblob); - + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->SessionKey.Length = 0; + sec_blob->SessionKey.MaximumLength = 0; return tmp - pbuffer; } @@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB, return; } -static int setup_ntlmssp_auth_req(char *ntlmsspblob, +static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, struct cifsSesInfo *ses, const struct nls_table *nls, bool first_time) { int bloblen; - bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls, + bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls, first_time); + pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen); return bloblen; } @@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate: if (first_time) /* should this be moved into common code with similar ntlmv2 path? */ - cifs_calculate_session_key(&ses->server->session_key, + cifs_calculate_mac_key(&ses->server->mac_signing_key, ntlm_session_key, ses->password); /* copy session key */ @@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate: cpu_to_le16(sizeof(struct ntlmv2_resp)); /* calculate session key */ - rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); - if (rc) { - kfree(v2_sess_key); - goto ssetup_exit; - } + setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); /* FIXME: calculate MAC key */ memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp)); bcc_ptr += sizeof(struct ntlmv2_resp); kfree(v2_sess_key); - if (ses->server->tilen > 0) { - memcpy(bcc_ptr, ses->server->tiblob, - ses->server->tilen); - bcc_ptr += ses->server->tilen; - } if (ses->capabilities & CAP_UNICODE) { if (iov[0].iov_len % 2) { *bcc_ptr = 0; @@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate: } /* bail out if key is too long */ if (msg->sesskey_len > - sizeof(ses->server->session_key.data.krb5)) { + sizeof(ses->server->mac_signing_key.data.krb5)) { cERROR(1, "Kerberos signing key too long (%u bytes)", msg->sesskey_len); rc = -EOVERFLOW; goto ssetup_exit; } if (first_time) { - ses->server->session_key.len = msg->sesskey_len; - memcpy(ses->server->session_key.data.krb5, + ses->server->mac_signing_key.len = msg->sesskey_len; + memcpy(ses->server->mac_signing_key.data.krb5, msg->data, msg->sesskey_len); } pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; @@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate: if (phase == NtLmNegotiate) { setup_ntlmssp_neg_req(pSMB, ses); iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); - iov[1].iov_base = &pSMB->req.SecurityBlob[0]; } else if (phase == NtLmAuthenticate) { int blob_len; - char *ntlmsspblob; - - ntlmsspblob = kmalloc(5 * - sizeof(struct _AUTHENTICATE_MESSAGE), - GFP_KERNEL); - if (!ntlmsspblob) { - cERROR(1, "Can't allocate NTLMSSP"); - rc = -ENOMEM; - goto ssetup_exit; - } - - blob_len = setup_ntlmssp_auth_req(ntlmsspblob, - ses, - nls_cp, - first_time); + blob_len = setup_ntlmssp_auth_req(pSMB, ses, + nls_cp, + first_time); iov[1].iov_len = blob_len; - iov[1].iov_base = ntlmsspblob; - pSMB->req.SecurityBlobLength = - cpu_to_le16(blob_len); /* Make sure that we tell the server that we are using the uid that it just gave us back on the response (challenge) */ @@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate: rc = -ENOSYS; goto ssetup_exit; } + iov[1].iov_base = &pSMB->req.SecurityBlob[0]; /* unicode strings must be word aligned */ if ((iov[0].iov_len + iov[1].iov_len) % 2) { *bcc_ptr = 0; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e0588cdf4cc..82f78c4d697 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) { rc = cifs_verify_signature(midQ->resp_buf, - ses->server, + &ses->server->mac_signing_key, midQ->sequence_number+1); if (rc) { cERROR(1, "Unexpected SMB signature"); @@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) { rc = cifs_verify_signature(out_buf, - ses->server, + &ses->server->mac_signing_key, midQ->sequence_number+1); if (rc) { cERROR(1, "Unexpected SMB signature"); @@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) { rc = cifs_verify_signature(out_buf, - ses->server, + &ses->server->mac_signing_key, midQ->sequence_number+1); if (rc) { cERROR(1, "Unexpected SMB signature"); diff --git a/fs/exec.c b/fs/exec.c index 2d945528274..828dd2461d6 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max) argv++; if (i++ >= max) return -E2BIG; + + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; cond_resched(); } } @@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv, while (len > 0) { int offset, bytes_to_copy; + if (fatal_signal_pending(current)) { + ret = -ERESTARTNOHAND; + goto out; + } + cond_resched(); + offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; @@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm, #else stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); + + if (unlikely(stack_top < mmap_min_addr) || + unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) + return -ENOMEM; + stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7d9d06ba184..81e086d8aa5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -808,7 +808,7 @@ int bdi_writeback_thread(void *data) wb->last_active = jiffies; set_current_state(TASK_INTERRUPTIBLE); - if (!list_empty(&bdi->work_list)) { + if (!list_empty(&bdi->work_list) || kthread_should_stop()) { __set_current_state(TASK_RUNNING); continue; } diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 6c2aad49d73..f7e13db613c 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -63,6 +63,7 @@ config NFS_V3_ACL config NFS_V4 bool "NFS client support for NFS version 4" depends on NFS_FS + select SUNRPC_GSS help This option enables support for version 4 of the NFS protocol (RFC 3530) in the kernel's NFS client. diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 4e7df2adb21..e7340729af8 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, sin1->sin6_scope_id != sin2->sin6_scope_id) return 0; - return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); + return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); } #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index eb51bd6201d..05bf3c0dc75 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) default: BUG(); } - if (res < 0) - dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager" - " - error %d!\n", - __func__, res); return res; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ec3966e4706..f4cbf0c306c 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) goto out_err; error = server->nfs_client->rpc_ops->statfs(server, fh, &res); + if (unlikely(error == -ESTALE)) { + struct dentry *pd_dentry; + pd_dentry = dget_parent(dentry); + if (pd_dentry != NULL) { + nfs_zap_caches(pd_dentry->d_inode); + dput(pd_dentry); + } + } nfs_free_fattr(res.fattr); if (error < 0) goto out_err; diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 95932f523ae..4264377552e 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -69,6 +69,7 @@ config NFSD_V4 depends on NFSD && PROC_FS && EXPERIMENTAL select NFSD_V3 select FS_POSIX_ACL + select SUNRPC_GSS help This option enables support in your system's NFS server for version 4 of the NFS protocol (RFC 3530). diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index d72cf2bb054..286e36e21da 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1932,7 +1932,8 @@ xfs_buf_init(void) if (!xfs_buf_zone) goto out; - xfslogd_workqueue = create_workqueue("xfslogd"); + xfslogd_workqueue = alloc_workqueue("xfslogd", + WQ_RESCUER | WQ_HIGHPRI, 1); if (!xfslogd_workqueue) goto out_free_buf_zone; diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 4fec427b83e..3b9e626f7cd 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr( { struct fsxattr fa; + memset(&fa, 0, sizeof(struct fsxattr)); + xfs_ilock(ip, XFS_ILOCK_SHARED); fa.fsx_xflags = xfs_ip2xflags(ip); fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c9f3cc5949a..3e5a51af757 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -386,7 +386,15 @@ struct drm_connector_funcs { void (*dpms)(struct drm_connector *connector, int mode); void (*save)(struct drm_connector *connector); void (*restore)(struct drm_connector *connector); - enum drm_connector_status (*detect)(struct drm_connector *connector); + + /* Check to see if anything is attached to the connector. + * @force is set to false whilst polling, true when checking the + * connector due to user request. @force can be used by the driver + * to avoid expensive, destructive operations during automated + * probing. + */ + enum drm_connector_status (*detect)(struct drm_connector *connector, + bool force); int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); int (*set_property)(struct drm_connector *connector, struct drm_property *property, uint64_t val); diff --git a/include/linux/compat.h b/include/linux/compat.h index 9ddc8780e8d..5778b559d59 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type, const struct compat_iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); + +extern void __user *compat_alloc_user_space(unsigned long len); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2c958f4fce1..926b50322a4 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); +extern int elevator_change(struct request_queue *, const char *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 03f616b78cf..e41f7dd1ae6 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -13,6 +13,7 @@ #include <linux/errno.h> struct device; +struct gpio_chip; /* * Some platforms don't support the GPIO programming interface. diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index d50ba858cfe..d1a9193960f 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) int ret; ret = dquot_alloc_space_nodirty(inode, nr); - if (!ret) - mark_inode_dirty_sync(inode); + if (!ret) { + /* + * Mark inode fully dirty. Since we are allocating blocks, inode + * would become fully dirty soon anyway and it reportedly + * reduces inode_lock contention. + */ + mark_inode_dirty(inode); + } return ret; } diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h index cc813f95a2f..c91302f3a25 100644 --- a/include/linux/spi/dw_spi.h +++ b/include/linux/spi/dw_spi.h @@ -14,7 +14,9 @@ #define SPI_MODE_OFFSET 6 #define SPI_SCPH_OFFSET 6 #define SPI_SCOL_OFFSET 7 + #define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) #define SPI_TMOD_TR 0x0 /* xmit & recv */ #define SPI_TMOD_TO 0x1 /* xmit only */ #define SPI_TMOD_RO 0x2 /* recv only */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 569dc722a60..85f38a63f09 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -30,7 +30,7 @@ struct rpc_inode; * The high-level client handle */ struct rpc_clnt { - struct kref cl_kref; /* Number of references */ + atomic_t cl_count; /* Number of references */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ spinlock_t cl_lock; /* spinlock */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f11100f9648..25e02c941ba 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -235,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define work_clear_pending(work) \ clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) +/* + * Workqueue flags and constants. For details, please refer to + * Documentation/workqueue.txt. + */ enum { WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 726cc353640..ef6c24a529e 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -27,11 +27,17 @@ struct cgroup_cls_state #ifdef CONFIG_NET_CLS_CGROUP static inline u32 task_cls_classid(struct task_struct *p) { + int classid; + if (in_interrupt()) return 0; - return container_of(task_subsys_state(p, net_cls_subsys_id), - struct cgroup_cls_state, css)->classid; + rcu_read_lock(); + classid = container_of(task_subsys_state(p, net_cls_subsys_id), + struct cgroup_cls_state, css)->classid; + rcu_read_unlock(); + + return classid; } #else extern int net_cls_subsys_id; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a4747a0f730..f976885f686 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) return csum_partial(diff, sizeof(diff), oldsum); } +extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, + int outin); + #endif /* __KERNEL__ */ #endif /* _NET_IP_VS_H */ diff --git a/include/net/sock.h b/include/net/sock.h index ac53bfbdfe1..adab9dc5818 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -752,6 +752,7 @@ struct proto { /* Keeping track of sk's, looking them up, and port selection methods. */ void (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); + void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); /* Keeping track of sockets in use */ diff --git a/include/net/udp.h b/include/net/udp.h index 7abdf305da5..a184d3496b1 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) } extern void udp_lib_unhash(struct sock *sk); +extern void udp_lib_rehash(struct sock *sk, u16 new_hash); static inline void udp_lib_close(struct sock *sk, long timeout) { diff --git a/kernel/compat.c b/kernel/compat.c index e167efce842..c9e2ec0b34a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) return 0; } + +/* + * Allocate user-space memory for the duration of a single system call, + * in order to marshall parameters inside a compat thunk. + */ +void __user *compat_alloc_user_space(unsigned long len) +{ + void __user *ptr; + + /* If len would occupy more than half of the entire compat space... */ + if (unlikely(len > (((compat_uptr_t)~0) >> 1))) + return NULL; + + ptr = arch_compat_alloc_user_space(len); + + if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) + return NULL; + + return ptr; +} +EXPORT_SYMBOL_GPL(compat_alloc_user_space); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 657555a5f30..db5b5606468 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5761,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: + case CPU_DOWN_FAILED: perf_event_init_cpu(cpu); break; + case CPU_UP_CANCELED: case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index b7e4c362361..645e541a45f 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, } else if (count == 11) { /* len('0x12345678/0') */ if (copy_from_user(ascii_value, buf, 11)) return -EFAULT; + if (strlen(ascii_value) != 10) + return -EINVAL; x = sscanf(ascii_value, "%x", &value); if (x != 1) return -EINVAL; - pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); + pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); } else return -EINVAL; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f6cd6faf84f..d3f795f01bb 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1121,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) return nr_alloc; } -static unsigned long preallocate_image_memory(unsigned long nr_pages) +static unsigned long preallocate_image_memory(unsigned long nr_pages, + unsigned long avail_normal) { - return preallocate_image_pages(nr_pages, GFP_IMAGE); + unsigned long alloc; + + if (avail_normal <= alloc_normal) + return 0; + + alloc = avail_normal - alloc_normal; + if (nr_pages < alloc) + alloc = nr_pages; + + return preallocate_image_pages(alloc, GFP_IMAGE); } #ifdef CONFIG_HIGHMEM @@ -1169,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, */ static void free_unnecessary_pages(void) { - unsigned long save_highmem, to_free_normal, to_free_highmem; + unsigned long save, to_free_normal, to_free_highmem; - to_free_normal = alloc_normal - count_data_pages(); - save_highmem = count_highmem_pages(); - if (alloc_highmem > save_highmem) { - to_free_highmem = alloc_highmem - save_highmem; + save = count_data_pages(); + if (alloc_normal >= save) { + to_free_normal = alloc_normal - save; + save = 0; + } else { + to_free_normal = 0; + save -= alloc_normal; + } + save += count_highmem_pages(); + if (alloc_highmem >= save) { + to_free_highmem = alloc_highmem - save; } else { to_free_highmem = 0; - to_free_normal -= save_highmem - alloc_highmem; + to_free_normal -= save - alloc_highmem; } memory_bm_position_reset(©_bm); @@ -1258,7 +1275,7 @@ int hibernate_preallocate_memory(void) { struct zone *zone; unsigned long saveable, size, max_size, count, highmem, pages = 0; - unsigned long alloc, save_highmem, pages_highmem; + unsigned long alloc, save_highmem, pages_highmem, avail_normal; struct timeval start, stop; int error; @@ -1295,6 +1312,7 @@ int hibernate_preallocate_memory(void) else count += zone_page_state(zone, NR_FREE_PAGES); } + avail_normal = count; count += highmem; count -= totalreserve_pages; @@ -1309,12 +1327,21 @@ int hibernate_preallocate_memory(void) */ if (size >= saveable) { pages = preallocate_image_highmem(save_highmem); - pages += preallocate_image_memory(saveable - pages); + pages += preallocate_image_memory(saveable - pages, avail_normal); goto out; } /* Estimate the minimum size of the image. */ pages = minimum_image_size(saveable); + /* + * To avoid excessive pressure on the normal zone, leave room in it to + * accommodate an image of the minimum size (unless it's already too + * small, in which case don't preallocate pages from it at all). + */ + if (avail_normal > pages) + avail_normal -= pages; + else + avail_normal = 0; if (size < pages) size = min_t(unsigned long, pages, max_size); @@ -1335,16 +1362,34 @@ int hibernate_preallocate_memory(void) */ pages_highmem = preallocate_image_highmem(highmem / 2); alloc = (count - max_size) - pages_highmem; - pages = preallocate_image_memory(alloc); - if (pages < alloc) - goto err_out; - size = max_size - size; - alloc = size; - size = preallocate_highmem_fraction(size, highmem, count); - pages_highmem += size; - alloc -= size; - pages += preallocate_image_memory(alloc); - pages += pages_highmem; + pages = preallocate_image_memory(alloc, avail_normal); + if (pages < alloc) { + /* We have exhausted non-highmem pages, try highmem. */ + alloc -= pages; + pages += pages_highmem; + pages_highmem = preallocate_image_highmem(alloc); + if (pages_highmem < alloc) + goto err_out; + pages += pages_highmem; + /* + * size is the desired number of saveable pages to leave in + * memory, so try to preallocate (all memory - size) pages. + */ + alloc = (count - pages) - size; + pages += preallocate_image_highmem(alloc); + } else { + /* + * There are approximately max_size saveable pages at this point + * and we want to reduce this number down to size. + */ + alloc = max_size - size; + size = preallocate_highmem_fraction(alloc, highmem, count); + pages_highmem += size; + alloc -= size; + size = preallocate_image_memory(alloc, avail_normal); + pages_highmem += preallocate_image_highmem(alloc - size); + pages += pages_highmem + size; + } /* * We only need as many page frames for the image as there are saveable diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4d..ed09d4f2a69 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } + +static void sched_avg_update(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ #if BITS_PER_LONG == 32 @@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } + + sched_avg_update(this_rq); } static void update_cpu_load_active(struct rq *this_rq) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 134f7edb30c..a171138a940 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling * Minimal preemption granularity for CPU-bound tasks: * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 2000000ULL; -unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; +unsigned int sysctl_sched_min_granularity = 750000ULL; +unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -static unsigned int sched_nr_latency = 3; +static unsigned int sched_nr_latency = 8; /* * After fork, child runs first. If set to 0 (default) then @@ -2267,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); available = total - rq->rt_avg; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7cb1f45a1de..fa7ece649fe 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1510,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; + /* reset in case of seek/pread */ + iter->flags &= ~FTRACE_ITER_HASH; return iter; } @@ -2416,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = no_llseek, .release = ftrace_filter_release, }; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b44..31cc4cb0dbf 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) tp_event->class && tp_event->class->reg && try_module_get(tp_event->mod)) { ret = perf_trace_event_init(tp_event, p_event); + if (ret) + module_put(tp_event->mod); break; } } @@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) } } out: + module_put(tp_event->mod); mutex_unlock(&event_mutex); } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b4..544301d29de 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs); -/* Check the name is good for event/group */ -static int check_event_name(const char *name) +/* Check the name is good for event/group/fields */ +static int is_good_name(const char *name) { if (!isalpha(*name) && *name != '_') return 0; @@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, else tp->rp.kp.pre_handler = kprobe_dispatcher; - if (!event || !check_event_name(event)) { + if (!event || !is_good_name(event)) { ret = -EINVAL; goto error; } @@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, if (!tp->call.name) goto error; - if (!group || !check_event_name(group)) { + if (!group || !is_good_name(group)) { ret = -EINVAL; goto error; } @@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) int i, ret = 0; int is_return = 0, is_delete = 0; char *symbol = NULL, *event = NULL, *group = NULL; - char *arg, *tmp; + char *arg; unsigned long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; @@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) /* parse arguments */ ret = 0; for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { + /* Increment count for freeing args in error case */ + tp->nr_args++; + /* Parse argument name */ arg = strchr(argv[i], '='); - if (arg) + if (arg) { *arg++ = '\0'; - else + tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); + } else { arg = argv[i]; + /* If argument name is omitted, set "argN" */ + snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); + tp->args[i].name = kstrdup(buf, GFP_KERNEL); + } - tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); if (!tp->args[i].name) { - pr_info("Failed to allocate argument%d name '%s'.\n", - i, argv[i]); + pr_info("Failed to allocate argument[%d] name.\n", i); ret = -ENOMEM; goto error; } - tmp = strchr(tp->args[i].name, ':'); - if (tmp) - *tmp = '_'; /* convert : to _ */ + + if (!is_good_name(tp->args[i].name)) { + pr_info("Invalid argument[%d] name: %s\n", + i, tp->args[i].name); + ret = -EINVAL; + goto error; + } if (conflict_field_name(tp->args[i].name, tp->args, i)) { - pr_info("Argument%d name '%s' conflicts with " + pr_info("Argument[%d] name '%s' conflicts with " "another field.\n", i, argv[i]); ret = -EINVAL; goto error; @@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) /* Parse fetch argument */ ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); if (ret) { - pr_info("Parse error at argument%d. (%d)\n", i, ret); - kfree(tp->args[i].name); + pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; } - - tp->nr_args++; } ret = register_trace_probe(tp); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 727f24e563a..f77afd93922 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1,19 +1,26 @@ /* - * linux/kernel/workqueue.c + * kernel/workqueue.c - generic async execution with shared worker pool * - * Generic mechanism for defining kernel helper threads for running - * arbitrary tasks in process context. + * Copyright (C) 2002 Ingo Molnar * - * Started by Ingo Molnar, Copyright (C) 2002 + * Derived from the taskqueue/keventd code by: + * David Woodhouse <dwmw2@infradead.org> + * Andrew Morton + * Kai Petzke <wpp@marie.physik.tu-berlin.de> + * Theodore Ts'o <tytso@mit.edu> * - * Derived from the taskqueue/keventd code by: + * Made to use alloc_percpu by Christoph Lameter. * - * David Woodhouse <dwmw2@infradead.org> - * Andrew Morton - * Kai Petzke <wpp@marie.physik.tu-berlin.de> - * Theodore Ts'o <tytso@mit.edu> + * Copyright (C) 2010 SUSE Linux Products GmbH + * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * - * Made to use alloc_percpu by Christoph Lameter. + * This is the generic async execution mechanism. Work items as are + * executed in process context. The worker pool is shared and + * automatically managed. There is one worker pool for each CPU and + * one extra for works which are better served by workers which are + * not bound to any specific CPU. + * + * Please read Documentation/workqueue.txt for details. */ #include <linux/module.h> diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f9..4ceb05d772a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, left -= sg_size; sg = alloc_fn(alloc_size, gfp_mask); - if (unlikely(!sg)) - return -ENOMEM; + if (unlikely(!sg)) { + /* + * Adjust entry count to reflect that the last + * entry of the previous table won't be used for + * linkage. Without this, sg_kfree() may get + * confused. + */ + if (prv) + table->nents = ++table->orig_nents; + + return -ENOMEM; + } sg_init_table(sg, alloc_size); table->nents = table->orig_nents += sg_size; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eaa4a5bbe06..c2bf86f470e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -445,8 +445,8 @@ static int bdi_forker_thread(void *ptr) switch (action) { case FORK_THREAD: __set_current_state(TASK_RUNNING); - task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", - dev_name(bdi->dev)); + task = kthread_create(bdi_writeback_thread, &bdi->wb, + "flush-%s", dev_name(bdi->dev)); if (IS_ERR(task)) { /* * If thread creation fails, force writeout of @@ -457,10 +457,13 @@ static int bdi_forker_thread(void *ptr) /* * The spinlock makes sure we do not lose * wake-ups when racing with 'bdi_queue_work()'. + * And as soon as the bdi thread is visible, we + * can start it. */ spin_lock_bh(&bdi->wb_lock); bdi->wb.task = task; spin_unlock_bh(&bdi->wb_lock); + wake_up_process(task); } break; diff --git a/net/9p/client.c b/net/9p/client.c index dc6f2f26d02..9eb72505308 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c) } } - if (c->tagpool) + if (c->tagpool) { + p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */ p9_idpool_destroy(c->tagpool); + } /* free requests associated with tags */ for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { @@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, int16_t nwqids, count; err = 0; + wqids = NULL; clnt = oldfid->clnt; if (clone) { fid = p9_fid_create(clnt); @@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, else fid->qid = oldfid->qid; + kfree(wqids); return fid; clunk_fid: + kfree(wqids); p9_client_clunk(fid); fid = NULL; diff --git a/net/core/dev.c b/net/core/dev.c index 3721fbb9a83..b9b22a3c4c8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { int queue_index; - struct sock *sk = skb->sk; + const struct net_device_ops *ops = dev->netdev_ops; - queue_index = sk_tx_queue_get(sk); - if (queue_index < 0) { - const struct net_device_ops *ops = dev->netdev_ops; + if (ops->ndo_select_queue) { + queue_index = ops->ndo_select_queue(dev, skb); + queue_index = dev_cap_txqueue(dev, queue_index); + } else { + struct sock *sk = skb->sk; + queue_index = sk_tx_queue_get(sk); + if (queue_index < 0) { - if (ops->ndo_select_queue) { - queue_index = ops->ndo_select_queue(dev, skb); - queue_index = dev_cap_txqueue(dev, queue_index); - } else { queue_index = 0; if (dev->real_num_tx_queues > 1) queue_index = skb_tx_hash(dev, skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26396ff67cf..c83b421341c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2706,7 +2706,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) } else if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; - headroom = NET_SKB_PAD + NET_IP_ALIGN; + headroom = skb_headroom(p); nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); if (unlikely(!nskb)) return -ENOMEM; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f0550941df7..721a8a37b45 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } if (!inet->inet_saddr) inet->inet_saddr = rt->rt_src; /* Update source address */ - if (!inet->inet_rcv_saddr) + if (!inet->inet_rcv_saddr) { inet->inet_rcv_saddr = rt->rt_src; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } inet->inet_daddr = rt->rt_dst; inet->inet_dport = usin->sin_port; sk->sk_state = TCP_ESTABLISHED; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a4396891835..7d02a9f999f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, struct fib_result res; int no_addr, rpf, accept_local; + bool dev_match; int ret; struct net *net; @@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, } *spec_dst = FIB_RES_PREFSRC(res); fib_combine_itag(itag, &res); + dev_match = false; + #ifdef CONFIG_IP_ROUTE_MULTIPATH - if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) + for (ret = 0; ret < res.fi->fib_nhs; ret++) { + struct fib_nh *nh = &res.fi->fib_nh[ret]; + + if (nh->nh_dev == dev) { + dev_match = true; + break; + } + } #else if (FIB_RES_DEV(res) == dev) + dev_match = true; #endif - { + if (dev_match) { ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; fib_res_put(&res); return ret; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 79d057a939b..4a8e370862b 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node) { struct tnode *ret = node_parent(node); - return rcu_dereference(ret); + return rcu_dereference_check(ret, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); } /* Same as rcu_assign_pointer @@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c) static struct leaf *trie_firstleaf(struct trie *t) { - struct tnode *n = (struct tnode *) rcu_dereference(t->trie); + struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); if (!n) return NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3f56b6e6c6a..6298f75d5e9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2738,6 +2738,11 @@ slow_output: } EXPORT_SYMBOL_GPL(__ip_route_output_key); +static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) +{ + return NULL; +} + static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } @@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .destroy = ipv4_dst_destroy, - .check = ipv4_dst_check, + .check = ipv4_blackhole_dst_check, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .entries = ATOMIC_INIT(0), }; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32e0bef60d0..fb23c2e63b5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) } EXPORT_SYMBOL(udp_lib_unhash); +/* + * inet_rcv_saddr was changed, we must rehash secondary hash + */ +void udp_lib_rehash(struct sock *sk, u16 newhash) +{ + if (sk_hashed(sk)) { + struct udp_table *udptable = sk->sk_prot->h.udp_table; + struct udp_hslot *hslot, *hslot2, *nhslot2; + + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + nhslot2 = udp_hashslot2(udptable, newhash); + udp_sk(sk)->udp_portaddr_hash = newhash; + if (hslot2 != nhslot2) { + hslot = udp_hashslot(udptable, sock_net(sk), + udp_sk(sk)->udp_port_hash); + /* we must lock primary chain too */ + spin_lock_bh(&hslot->lock); + + spin_lock(&hslot2->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); + hslot2->count--; + spin_unlock(&hslot2->lock); + + spin_lock(&nhslot2->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, + &nhslot2->head); + nhslot2->count++; + spin_unlock(&nhslot2->lock); + + spin_unlock_bh(&hslot->lock); + } + } +} +EXPORT_SYMBOL(udp_lib_rehash); + +static void udp_v4_rehash(struct sock *sk) +{ + u16 new_hash = udp4_portaddr_hash(sock_net(sk), + inet_sk(sk)->inet_rcv_saddr, + inet_sk(sk)->inet_num); + udp_lib_rehash(sk, new_hash); +} + static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; @@ -1843,6 +1886,7 @@ struct proto udp_prot = { .backlog_rcv = __udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7d929a22cbc..ef371aa01ac 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -105,9 +105,12 @@ ipv4_connected: if (ipv6_addr_any(&np->saddr)) ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); - if (ipv6_addr_any(&np->rcv_saddr)) + if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } goto out; } @@ -181,6 +184,8 @@ ipv4_connected: if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); inet->inet_rcv_saddr = LOOPBACK4_IPV6; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); } ip6_dst_store(sk, dst, diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 13ef5bc05cf..578f3c1a16d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb) kfree_skb(NFCT_FRAG6_CB(skb)->orig); } -/* Memory Tracking Functions. */ -static void frag_kfree_skb(struct sk_buff *skb) -{ - atomic_sub(skb->truesize, &nf_init_frags.mem); - nf_skb_free(skb); - kfree_skb(skb); -} - /* Destruction primitives. */ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) @@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, } found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. - */ - if (prev) { - int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; - - if (i > 0) { - offset += i; - if (end <= offset) { - pr_debug("overlap\n"); - goto err; - } - if (!pskb_pull(skb, i)) { - pr_debug("Can't pull\n"); - goto err; - } - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } - - /* Look for overlap with succeeding segments. - * If we can merge fragments, do it. + /* RFC5722, Section 4: + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments, including those not yet received) MUST be silently + * discarded. */ - while (next && NFCT_FRAG6_CB(next)->offset < end) { - /* overlap is 'i' bytes */ - int i = end - NFCT_FRAG6_CB(next)->offset; - - if (i < next->len) { - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - pr_debug("Eat head of the overlapped parts.: %d", i); - if (!pskb_pull(next, i)) - goto err; - /* next fragment */ - NFCT_FRAG6_CB(next)->offset += i; - fq->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragmnet is completely overridden with - * new one drop it. - */ - next = next->next; + /* Check for overlap with preceding fragment. */ + if (prev && + (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) + goto discard_fq; - if (prev) - prev->next = next; - else - fq->q.fragments = next; - - fq->q.meat -= free_it->len; - frag_kfree_skb(free_it); - } - } + /* Look for overlap with succeeding segment. */ + if (next && NFCT_FRAG6_CB(next)->offset < end) + goto discard_fq; NFCT_FRAG6_CB(skb)->offset = offset; @@ -371,6 +319,8 @@ found: write_unlock(&nf_frags.lock); return 0; +discard_fq: + fq_kill(fq); err: return -1; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 545c4141b75..64cfef1b0a4 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) } EXPORT_SYMBOL(ip6_frag_match); -/* Memory Tracking Functions. */ -static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) -{ - atomic_sub(skb->truesize, &nf->mem); - kfree_skb(skb); -} - void ip6_frag_init(struct inet_frag_queue *q, void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); @@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, } found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. + /* RFC5722, Section 4: + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments, including those not yet received) MUST be silently + * discarded. */ - if (prev) { - int i = (FRAG6_CB(prev)->offset + prev->len) - offset; - if (i > 0) { - offset += i; - if (end <= offset) - goto err; - if (!pskb_pull(skb, i)) - goto err; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } + /* Check for overlap with preceding fragment. */ + if (prev && + (FRAG6_CB(prev)->offset + prev->len) - offset > 0) + goto discard_fq; - /* Look for overlap with succeeding segments. - * If we can merge fragments, do it. - */ - while (next && FRAG6_CB(next)->offset < end) { - int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ - - if (i < next->len) { - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - if (!pskb_pull(next, i)) - goto err; - FRAG6_CB(next)->offset += i; /* next fragment */ - fq->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragment is completely overridden with - * new one drop it. - */ - next = next->next; - - if (prev) - prev->next = next; - else - fq->q.fragments = next; - - fq->q.meat -= free_it->len; - frag_kfree_skb(fq->q.net, free_it); - } - } + /* Look for overlap with succeeding segment. */ + if (next && FRAG6_CB(next)->offset < end) + goto discard_fq; FRAG6_CB(skb)->offset = offset; @@ -436,6 +393,8 @@ found: write_unlock(&ip6_frags.lock); return -1; +discard_fq: + fq_kill(fq); err: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1dd1affdead..5acb3560ff1 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } +static void udp_v6_rehash(struct sock *sk) +{ + u16 new_hash = udp6_portaddr_hash(sock_net(sk), + &inet6_sk(sk)->rcv_saddr, + inet_sk(sk)->inet_num); + + udp_lib_rehash(sk, new_hash); +} + static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, struct in6_addr *saddr, __be16 sport, @@ -1447,6 +1456,7 @@ struct proto udpv6_prot = { .backlog_rcv = udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e9427..6130f9d9dbe 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ le16_to_cpus(&val_len); n+=2; - if (val_len > 1016) { + if (val_len >= 1016) { IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); return -RSP_INVALID_COMMAND_FORMAT; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f8ddba4801..4c2f89df5cc 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, ip_vs_out_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); + ip_vs_update_conntrack(skb, cp, 0); ip_vs_conn_put(cp); skb->ipvs_property = 1; diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 33b329bfc2d..7e9af5b76d9 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -410,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; - struct nf_conn *ct; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -497,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_control_add(n_cp, cp); } - ct = (struct nf_conn *)skb->nfct; - if (ct && ct != &nf_conntrack_untracked) - ip_vs_expect_related(skb, ct, n_cp, - IPPROTO_TCP, &n_cp->dport, 1); - /* * Move tunnel to listen state */ diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 21e1a5e9b9d..49df6bea6a2 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } #endif -static void -ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) +void +ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) { struct nf_conn *ct = (struct nf_conn *)skb->nfct; struct nf_conntrack_tuple new_tuple; @@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) * real-server we will see RIP->DIP. */ new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; - new_tuple.src.u3 = cp->daddr; + if (outin) + new_tuple.src.u3 = cp->daddr; + else + new_tuple.dst.u3 = cp->vaddr; /* * This will also take care of UDP and other protocols. */ - new_tuple.src.u.tcp.port = cp->dport; + if (outin) + new_tuple.src.u.tcp.port = cp->dport; + else + new_tuple.dst.u.tcp.port = cp->vport; nf_conntrack_alter_reply(ct, &new_tuple); } @@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); - ip_vs_update_conntrack(skb, cp); + ip_vs_update_conntrack(skb, cp, 1); /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still @@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); - ip_vs_update_conntrack(skb, cp); + ip_vs_update_conntrack(skb, cp, 1); /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24b2cd55563..d344dc481cc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1232,6 +1232,18 @@ out: return 0; } +static bool list_has_sctp_addr(const struct list_head *list, + union sctp_addr *ipaddr) +{ + struct sctp_transport *addr; + + list_for_each_entry(addr, list, transports) { + if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) + return true; + } + + return false; +} /* A restart is occurring, check to make sure no new addresses * are being added as we may be under a takeover attack. */ @@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, struct sctp_chunk *init, sctp_cmd_seq_t *commands) { - struct sctp_transport *new_addr, *addr; - int found; + struct sctp_transport *new_addr; + int ret = 1; - /* Implementor's Guide - Sectin 5.2.2 + /* Implementor's Guide - Section 5.2.2 * ... * Before responding the endpoint MUST check to see if the * unexpected INIT adds new addresses to the association. If new @@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, /* Search through all current addresses and make sure * we aren't adding any new ones. */ - new_addr = NULL; - found = 0; - list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, - transports) { - found = 0; - list_for_each_entry(addr, &asoc->peer.transport_addr_list, - transports) { - if (sctp_cmp_addr_exact(&new_addr->ipaddr, - &addr->ipaddr)) { - found = 1; - break; - } - } - if (!found) + transports) { + if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, + &new_addr->ipaddr)) { + sctp_sf_send_restart_abort(&new_addr->ipaddr, init, + commands); + ret = 0; break; - } - - /* If a new address was added, ABORT the sender. */ - if (!found && new_addr) { - sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); + } } /* Return success if all addresses were found. */ - return found; + return ret; } /* Populate the verification/tie tags based on overlapping INIT diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 36cb66022a2..e9eaaf7d43c 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { static LIST_HEAD(cred_unused); static unsigned long number_cred_unused; -#define MAX_HASHTABLE_BITS (10) +#define MAX_HASHTABLE_BITS (14) static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) { unsigned long num; diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dcfc66bab2b..12c48598281 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode) struct rpc_inode *rpci = RPC_I(inode); struct gss_upcall_msg *gss_msg; +restart: spin_lock(&inode->i_lock); - while (!list_empty(&rpci->in_downcall)) { + list_for_each_entry(gss_msg, &rpci->in_downcall, list) { - gss_msg = list_entry(rpci->in_downcall.next, - struct gss_upcall_msg, list); + if (!list_empty(&gss_msg->msg.list)) + continue; gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); - spin_lock(&inode->i_lock); + goto restart; } spin_unlock(&inode->i_lock); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 03264461052..778e5dfc514 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -237,6 +237,7 @@ get_key(const void *p, const void *end, if (!supported_gss_krb5_enctype(alg)) { printk(KERN_WARNING "gss_kerberos_mech: unsupported " "encryption key algorithm %d\n", alg); + p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_netobj(p, end, &key); @@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) ctx->enctype = ENCTYPE_DES_CBC_RAW; ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); - if (ctx->gk5e == NULL) + if (ctx->gk5e == NULL) { + p = ERR_PTR(-EINVAL); goto out_err; + } /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore * completely (like the next twenty bytes): */ - if (unlikely(p + 20 > end || p + 20 < p)) + if (unlikely(p + 20 > end || p + 20 < p)) { + p = ERR_PTR(-EFAULT); goto out_err; + } p += 20; p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) @@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, if (ctx->seq_send64 != ctx->seq_send) { dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, (long unsigned)ctx->seq_send64, ctx->seq_send); + p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index dc3f1f5ed86..adade3d313f 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len, if (version != 1) { dprintk("RPC: unknown spkm3 token format: " "obsolete nfs-utils?\n"); + p = ERR_PTR(-EINVAL); goto out_err_free_ctx; } @@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len, if (IS_ERR(p)) goto out_err_free_intg_alg; - if (p != end) + if (p != end) { + p = ERR_PTR(-EFAULT); goto out_err_free_intg_key; + } ctx_id->internal_ctx_id = ctx; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 2388d83b68f..fa5549079d7 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru goto out_no_principal; } - kref_init(&clnt->cl_kref); + atomic_set(&clnt->cl_count, 1); err = rpc_setup_pipedir(clnt, program->pipe_dir_name); if (err < 0) @@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt) if (new->cl_principal == NULL) goto out_no_principal; } - kref_init(&new->cl_kref); + atomic_set(&new->cl_count, 1); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); xprt_get(clnt->cl_xprt); - kref_get(&clnt->cl_kref); + atomic_inc(&clnt->cl_count); rpc_register_client(new); rpciod_up(); return new; @@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client); * Free an RPC client */ static void -rpc_free_client(struct kref *kref) +rpc_free_client(struct rpc_clnt *clnt) { - struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); - dprintk("RPC: destroying %s client for %s\n", clnt->cl_protname, clnt->cl_server); if (!IS_ERR(clnt->cl_path.dentry)) { @@ -495,12 +493,10 @@ out_free: * Free an RPC client */ static void -rpc_free_auth(struct kref *kref) +rpc_free_auth(struct rpc_clnt *clnt) { - struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); - if (clnt->cl_auth == NULL) { - rpc_free_client(kref); + rpc_free_client(clnt); return; } @@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref) * release remaining GSS contexts. This mechanism ensures * that it can do so safely. */ - kref_init(kref); + atomic_inc(&clnt->cl_count); rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; - kref_put(kref, rpc_free_client); + if (atomic_dec_and_test(&clnt->cl_count)) + rpc_free_client(clnt); } /* @@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt) if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); - kref_put(&clnt->cl_kref, rpc_free_auth); + if (atomic_dec_and_test(&clnt->cl_count)) + rpc_free_auth(clnt); } /** @@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) if (clnt != NULL) { rpc_task_release_client(task); task->tk_client = clnt; - kref_get(&clnt->cl_kref); + atomic_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; /* Add to the client's list of all tasks */ @@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task) task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { - task->tk_action = call_allocate; + task->tk_action = call_refresh; return; } @@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task) } /* - * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. + * 2. Bind and/or refresh the credentials + */ +static void +call_refresh(struct rpc_task *task) +{ + dprint_status(task); + + task->tk_action = call_refreshresult; + task->tk_status = 0; + task->tk_client->cl_stats->rpcauthrefresh++; + rpcauth_refreshcred(task); +} + +/* + * 2a. Process the results of a credential refresh + */ +static void +call_refreshresult(struct rpc_task *task) +{ + int status = task->tk_status; + + dprint_status(task); + + task->tk_status = 0; + task->tk_action = call_allocate; + if (status >= 0 && rpcauth_uptodatecred(task)) + return; + switch (status) { + case -EACCES: + rpc_exit(task, -EACCES); + return; + case -ENOMEM: + rpc_exit(task, -ENOMEM); + return; + case -ETIMEDOUT: + rpc_delay(task, 3*HZ); + } + task->tk_action = call_refresh; +} + +/* + * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release). */ static void call_allocate(struct rpc_task *task) { - unsigned int slack = task->tk_client->cl_auth->au_cslack; + unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = task->tk_xprt; struct rpc_procinfo *proc = task->tk_msg.rpc_proc; @@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task) dprint_status(task); task->tk_status = 0; - task->tk_action = call_refresh; + task->tk_action = call_bind; if (req->rq_buffer) return; @@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task) rpc_exit(task, -ERESTARTSYS); } -/* - * 2a. Bind and/or refresh the credentials - */ -static void -call_refresh(struct rpc_task *task) -{ - dprint_status(task); - - task->tk_action = call_refreshresult; - task->tk_status = 0; - task->tk_client->cl_stats->rpcauthrefresh++; - rpcauth_refreshcred(task); -} - -/* - * 2b. Process the results of a credential refresh - */ -static void -call_refreshresult(struct rpc_task *task) -{ - int status = task->tk_status; - - dprint_status(task); - - task->tk_status = 0; - task->tk_action = call_bind; - if (status >= 0 && rpcauth_uptodatecred(task)) - return; - switch (status) { - case -EACCES: - rpc_exit(task, -EACCES); - return; - case -ENOMEM: - rpc_exit(task, -ENOMEM); - return; - case -ETIMEDOUT: - rpc_delay(task, 3*HZ); - } - task->tk_action = call_refresh; -} - static inline int rpc_task_need_encode(struct rpc_task *task) { diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 95ccbcf45d3..8c8eef2b8f2 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, return; do { msg = list_entry(head->next, struct rpc_pipe_msg, list); - list_del(&msg->list); + list_del_init(&msg->list); msg->errno = err; destroy_msg(msg); } while (!list_empty(head)); @@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) if (msg != NULL) { spin_lock(&inode->i_lock); msg->errno = -EAGAIN; - list_del(&msg->list); + list_del_init(&msg->list); spin_unlock(&inode->i_lock); rpci->ops->destroy_msg(msg); } @@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; spin_lock(&inode->i_lock); - list_del(&msg->list); + list_del_init(&msg->list); spin_unlock(&inode->i_lock); rpci->ops->destroy_msg(msg); } @@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v) static int rpc_info_open(struct inode *inode, struct file *file) { - struct rpc_clnt *clnt; + struct rpc_clnt *clnt = NULL; int ret = single_open(file, rpc_show_info, NULL); if (!ret) { struct seq_file *m = file->private_data; - mutex_lock(&inode->i_mutex); - clnt = RPC_I(inode)->private; - if (clnt) { - kref_get(&clnt->cl_kref); + + spin_lock(&file->f_path.dentry->d_lock); + if (!d_unhashed(file->f_path.dentry)) + clnt = RPC_I(inode)->private; + if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) { + spin_unlock(&file->f_path.dentry->d_lock); m->private = clnt; } else { + spin_unlock(&file->f_path.dentry->d_lock); single_release(inode, file); ret = -EINVAL; } - mutex_unlock(&inode->i_mutex); } return ret; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b..0b39b2451ea 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) static u32 ordernum = 1; struct unix_address *addr; int err; + unsigned int retries = 0; mutex_lock(&u->readlock); @@ -717,9 +718,17 @@ retry: if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, addr->hash)) { spin_unlock(&unix_table_lock); - /* Sanity yield. It is unusual case, but yet... */ - if (!(ordernum&0xFF)) - yield(); + /* + * __unix_find_socket_byname() may take long time if many names + * are already in use. + */ + cond_resched(); + /* Give up if all names seems to be in use. */ + if (retries++ == 0xFFFFF) { + err = -ENOSPC; + kfree(addr); + goto out; + } goto retry; } addr->hash ^= sk->sk_type; diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c index 79ab973fb43..fc3b18d844a 100644 --- a/scripts/basic/docproc.c +++ b/scripts/basic/docproc.c @@ -34,12 +34,14 @@ * */ +#define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <unistd.h> #include <limits.h> +#include <errno.h> #include <sys/types.h> #include <sys/wait.h> @@ -54,6 +56,7 @@ typedef void FILEONLY(char * file); FILEONLY *internalfunctions; FILEONLY *externalfunctions; FILEONLY *symbolsonly; +FILEONLY *findall; typedef void FILELINE(char * file, char * line); FILELINE * singlefunctions; @@ -65,12 +68,30 @@ FILELINE * docsection; #define KERNELDOCPATH "scripts/" #define KERNELDOC "kernel-doc" #define DOCBOOK "-docbook" +#define LIST "-list" #define FUNCTION "-function" #define NOFUNCTION "-nofunction" #define NODOCSECTIONS "-no-doc-sections" static char *srctree, *kernsrctree; +static char **all_list = NULL; +static int all_list_len = 0; + +static void consume_symbol(const char *sym) +{ + int i; + + for (i = 0; i < all_list_len; i++) { + if (!all_list[i]) + continue; + if (strcmp(sym, all_list[i])) + continue; + all_list[i] = NULL; + break; + } +} + static void usage (void) { fprintf(stderr, "Usage: docproc {doc|depend} file\n"); @@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type) struct symfile * sym = &symfilelist[i]; for (j=0; j < sym->symbolcnt; j++) { vec[idx++] = type; + consume_symbol(sym->symbollist[j].name); vec[idx++] = sym->symbollist[j].name; } } @@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line) vec[idx++] = &line[i]; } } + for (i = 0; i < idx; i++) { + if (strcmp(vec[i], FUNCTION)) + continue; + consume_symbol(vec[i + 1]); + } vec[idx++] = filename; vec[idx] = NULL; exec_kernel_doc(vec); @@ -306,6 +333,10 @@ static void docsect(char *filename, char *line) if (*s == '\n') *s = '\0'; + asprintf(&s, "DOC: %s", line); + consume_symbol(s); + free(s); + vec[0] = KERNELDOC; vec[1] = DOCBOOK; vec[2] = FUNCTION; @@ -315,6 +346,84 @@ static void docsect(char *filename, char *line) exec_kernel_doc(vec); } +static void find_all_symbols(char *filename) +{ + char *vec[4]; /* kerneldoc -list file NULL */ + pid_t pid; + int ret, i, count, start; + char real_filename[PATH_MAX + 1]; + int pipefd[2]; + char *data, *str; + size_t data_len = 0; + + vec[0] = KERNELDOC; + vec[1] = LIST; + vec[2] = filename; + vec[3] = NULL; + + if (pipe(pipefd)) { + perror("pipe"); + exit(1); + } + + switch (pid=fork()) { + case -1: + perror("fork"); + exit(1); + case 0: + close(pipefd[0]); + dup2(pipefd[1], 1); + memset(real_filename, 0, sizeof(real_filename)); + strncat(real_filename, kernsrctree, PATH_MAX); + strncat(real_filename, "/" KERNELDOCPATH KERNELDOC, + PATH_MAX - strlen(real_filename)); + execvp(real_filename, vec); + fprintf(stderr, "exec "); + perror(real_filename); + exit(1); + default: + close(pipefd[1]); + data = malloc(4096); + do { + while ((ret = read(pipefd[0], + data + data_len, + 4096)) > 0) { + data_len += ret; + data = realloc(data, data_len + 4096); + } + } while (ret == -EAGAIN); + if (ret != 0) { + perror("read"); + exit(1); + } + waitpid(pid, &ret ,0); + } + if (WIFEXITED(ret)) + exitstatus |= WEXITSTATUS(ret); + else + exitstatus = 0xff; + + count = 0; + /* poor man's strtok, but with counting */ + for (i = 0; i < data_len; i++) { + if (data[i] == '\n') { + count++; + data[i] = '\0'; + } + } + start = all_list_len; + all_list_len += count; + all_list = realloc(all_list, sizeof(char *) * all_list_len); + str = data; + for (i = 0; i < data_len && start != all_list_len; i++) { + if (data[i] == '\0') { + all_list[start] = str; + str = data + i + 1; + start++; + } + } +} + /* * Parse file, calling action specific functions for: * 1) Lines containing !E @@ -322,7 +431,8 @@ static void docsect(char *filename, char *line) * 3) Lines containing !D * 4) Lines containing !F * 5) Lines containing !P - * 6) Default lines - lines not matching the above + * 6) Lines containing !C + * 7) Default lines - lines not matching the above */ static void parse_file(FILE *infile) { @@ -365,6 +475,12 @@ static void parse_file(FILE *infile) s++; docsection(line + 2, s); break; + case 'C': + while (*s && !isspace(*s)) s++; + *s = '\0'; + if (findall) + findall(line+2); + break; default: defaultline(line); } @@ -380,6 +496,7 @@ static void parse_file(FILE *infile) int main(int argc, char *argv[]) { FILE * infile; + int i; srctree = getenv("SRCTREE"); if (!srctree) @@ -415,6 +532,7 @@ int main(int argc, char *argv[]) symbolsonly = find_export_symbols; singlefunctions = noaction2; docsection = noaction2; + findall = find_all_symbols; parse_file(infile); /* Rewind to start from beginning of file again */ @@ -425,8 +543,16 @@ int main(int argc, char *argv[]) symbolsonly = printline; singlefunctions = singfunc; docsection = docsect; + findall = NULL; parse_file(infile); + + for (i = 0; i < all_list_len; i++) { + if (!all_list[i]) + continue; + fprintf(stderr, "Warning: didn't use docs for %s\n", + all_list[i]); + } } else if (strcmp("depend", argv[1]) == 0) { @@ -439,6 +565,7 @@ int main(int argc, char *argv[]) symbolsonly = adddep; singlefunctions = adddep2; docsection = adddep2; + findall = adddep; parse_file(infile); printf("\n"); } diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 102e1235fd5..cdb6dc1f645 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -44,12 +44,13 @@ use strict; # Note: This only supports 'c'. # usage: -# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ] +# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ] # [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile # or # [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile # # Set output format using one of -docbook -html -text or -man. Default is man. +# The -list format is for internal use by docproc. # # -no-doc-sections # Do not output DOC: sections @@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1", $type_param, "\$1" ); my $blankline_text = ""; +# list mode +my %highlights_list = ( $type_constant, "\$1", + $type_func, "\$1", + $type_struct, "\$1", + $type_param, "\$1" ); +my $blankline_list = ""; sub usage { - print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n"; + print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n"; + print " [ -no-doc-sections ]\n"; print " [ -function funcname [ -function funcname ...] ]\n"; print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; print " c source file(s) > outputfile\n"; @@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) { $output_mode = "xml"; %highlights = %highlights_xml; $blankline = $blankline_xml; + } elsif ($cmd eq "-list") { + $output_mode = "list"; + %highlights = %highlights_list; + $blankline = $blankline_list; } elsif ($cmd eq "-gnome") { $output_mode = "gnome"; %highlights = %highlights_gnome; @@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) { } } +## list mode output functions + +sub output_function_list(%) { + my %args = %{$_[0]}; + + print $args{'function'} . "\n"; +} + +# output enum in list +sub output_enum_list(%) { + my %args = %{$_[0]}; + print $args{'enum'} . "\n"; +} + +# output typedef in list +sub output_typedef_list(%) { + my %args = %{$_[0]}; + print $args{'typedef'} . "\n"; +} + +# output struct as list +sub output_struct_list(%) { + my %args = %{$_[0]}; + + print $args{'struct'} . "\n"; +} + +sub output_blockhead_list(%) { + my %args = %{$_[0]}; + my ($parameter, $section); + + foreach $section (@{$args{'sectionlist'}}) { + print "DOC: $section\n"; + } +} + ## # generic output function for all types (function, struct/union, typedef, enum); # calls the generated, variable output_ function name based on @@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) { foreach $px (0 .. $#prms) { $prm_clean = $prms[$px]; $prm_clean =~ s/\[.*\]//; - $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//; + $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; # ignore array size in a parameter string; # however, the original param string may contain # spaces, e.g.: addr[6 + 2] diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index b2b0998d6ab..60924f6a52d 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void) keyring_r = NULL; me = current; + rcu_read_lock(); write_lock_irq(&tasklist_lock); parent = me->real_parent; @@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void) goto not_permitted; /* the keyrings must have the same UID */ - if (pcred->tgcred->session_keyring->uid != mycred->euid || + if ((pcred->tgcred->session_keyring && + pcred->tgcred->session_keyring->uid != mycred->euid) || mycred->tgcred->session_keyring->uid != mycred->euid) goto not_permitted; @@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void) set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); if (oldcred) put_cred(oldcred); return 0; @@ -1327,6 +1330,7 @@ already_same: ret = 0; not_permitted: write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); put_cred(cred); return ret; diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index eb68326c37d..a7868ad4d53 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card, if (get_user(device, (int __user *)argp)) return -EFAULT; + if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */ + device = SNDRV_RAWMIDI_DEVICES - 1; mutex_lock(®ister_mutex); device = device < 0 ? 0 : device + 1; while (device < SNDRV_RAWMIDI_DEVICES) { diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index 685712276ac..69cd7b3c362 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c @@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level) return 0; _error: - snd_seq_oss_writeq_delete(dp->writeq); - snd_seq_oss_readq_delete(dp->readq); snd_seq_oss_synth_cleanup(dp); snd_seq_oss_midi_cleanup(dp); - delete_port(dp); delete_seq_queue(dp->queue); - kfree(dp); + delete_port(dp); return rc; } @@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp) static int delete_port(struct seq_oss_devinfo *dp) { - if (dp->port < 0) + if (dp->port < 0) { + kfree(dp); return 0; + } debug_printk(("delete_port %i\n", dp->port)); return snd_seq_event_port_detach(dp->cseq, dp->port); diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index 5f3e68401f9..91d6023a63e 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c @@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; +#ifndef MSND_CLASSIC static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; -#ifndef MSND_CLASSIC /* Extra Peripheral Configuration (Default: Disable) */ static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; @@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx) struct snd_card *card; struct snd_msnd *chip; - if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { + if (has_isapnp(idx) +#ifndef MSND_CLASSIC + || cfg[idx] == SNDRV_AUTO_PORT +#endif + ) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); return -ENODEV; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3827092cc1d..14829210ef0 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, cfg->hp_outs--; memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); - memmove(sequences_hp + i - 1, sequences_hp + i, + memmove(sequences_hp + i, sequences_hp + i + 1, sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); } } diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 4ef5efaaaef..488fd9ade1b 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = { {} /* terminator */ }; +/* Errata: CS4207 rev C0/C1/C2 Silicon + * + * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf + * + * 6. At high temperature (TA > +85°C), the digital supply current (IVD) + * may be excessive (up to an additional 200 μA), which is most easily + * observed while the part is being held in reset (RESET# active low). + * + * Root Cause: At initial powerup of the device, the logic that drives + * the clock and write enable to the S/PDIF SRC RAMs is not properly + * initialized. + * Certain random patterns will cause a steady leakage current in those + * RAM cells. The issue will resolve once the SRCs are used (turned on). + * + * Workaround: The following verb sequence briefly turns on the S/PDIF SRC + * blocks, which will alleviate the issue. + */ + +static struct hda_verb cs_errata_init_verbs[] = { + {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ + {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x9999}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0xa412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0009}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0x2412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0000}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, + {0x11, AC_VERB_SET_PROC_STATE, 0x00}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ + /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ + + {} /* terminator */ +}; + /* SPDIF setup */ static void init_digital(struct hda_codec *codec) { @@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec) { struct cs_spec *spec = codec->spec; + /* init_verb sequence for C0/C1/C2 errata*/ + snd_hda_sequence_write(codec, cs_errata_init_verbs); + snd_hda_sequence_write(codec, cs_coef_init_verbs); if (spec->gpio_mask) { diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 5cdb80edbd7..71f9d6475b0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -116,6 +116,7 @@ struct conexant_spec { unsigned int dell_vostro:1; unsigned int ideapad:1; unsigned int thinkpad:1; + unsigned int hp_laptop:1; unsigned int ext_mic_present; unsigned int recording; @@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec) } } +/* toggle input of built-in digital mic and mic jack appropriately */ +static void cxt5066_hp_laptop_automic(struct hda_codec *codec) +{ + unsigned int present; + + present = snd_hda_jack_detect(codec, 0x1b); + snd_printdd("CXT5066: external microphone present=%d\n", present); + snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL, + present ? 1 : 3); +} + + /* toggle input of built-in digital mic and mic jack appropriately order is: external mic -> dock mic -> interal mic */ static void cxt5066_thinkpad_automic(struct hda_codec *codec) @@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res) } /* unsolicited event for jack sensing */ +static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res) +{ + snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26); + switch (res >> 26) { + case CONEXANT_HP_EVENT: + cxt5066_hp_automute(codec); + break; + case CONEXANT_MIC_EVENT: + cxt5066_hp_laptop_automic(codec); + break; + } +} + +/* unsolicited event for jack sensing */ static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) { snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); @@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = { { } /* end */ }; + +static struct hda_verb cxt5066_init_verbs_hp_laptop[] = { + {0x14, AC_VERB_SET_CONNECT_SEL, 0x0}, + {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, + {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT}, + { } /* end */ +}; + /* initialize jack-sensing, too */ static int cxt5066_init(struct hda_codec *codec) { @@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec) cxt5066_ideapad_automic(codec); else if (spec->thinkpad) cxt5066_thinkpad_automic(codec); + else if (spec->hp_laptop) + cxt5066_hp_laptop_automic(codec); } cxt5066_set_mic_boost(codec); return 0; @@ -3031,6 +3068,7 @@ enum { CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ + CXT5066_HP_LAPTOP, /* HP Laptop */ CXT5066_MODELS }; @@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = { [CXT5066_DELL_VOSTO] = "dell-vostro", [CXT5066_IDEAPAD] = "ideapad", [CXT5066_THINKPAD] = "thinkpad", + [CXT5066_HP_LAPTOP] = "hp-laptop", }; static struct snd_pci_quirk cxt5066_cfg_tbl[] = { @@ -3052,8 +3091,10 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), @@ -3116,6 +3157,23 @@ static int patch_cxt5066(struct hda_codec *codec) spec->num_init_verbs++; spec->dell_automute = 1; break; + case CXT5066_HP_LAPTOP: + codec->patch_ops.init = cxt5066_init; + codec->patch_ops.unsol_event = cxt5066_hp_laptop_event; + spec->init_verbs[spec->num_init_verbs] = + cxt5066_init_verbs_hp_laptop; + spec->num_init_verbs++; + spec->hp_laptop = 1; + spec->mixers[spec->num_mixers++] = cxt5066_mixer_master; + spec->mixers[spec->num_mixers++] = cxt5066_mixers; + /* no S/PDIF out */ + spec->multiout.dig_out_nid = 0; + /* input source automatically selected */ + spec->input_mux = NULL; + spec->port_d_mode = 0; + spec->mic_boost = 3; /* default 30dB gain */ + break; + case CXT5066_OLPC_XO_1_5: codec->patch_ops.init = cxt5066_olpc_init; codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 627bf996336..bcbf9160ed8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5334,6 +5334,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, static struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), + SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), {} }; diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 6147216af74..a3409edcfb5 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h @@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci); int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); int oxygen_pci_resume(struct pci_dev *pci); #endif +void oxygen_pci_shutdown(struct pci_dev *pci); /* oxygen_mixer.c */ diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index fad03d64e3a..7e93cf88443 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c @@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip) } } -static void oxygen_card_free(struct snd_card *card) +static void oxygen_shutdown(struct oxygen *chip) { - struct oxygen *chip = card->private_data; - spin_lock_irq(&chip->reg_lock); chip->interrupt_mask = 0; chip->pcm_running = 0; oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); spin_unlock_irq(&chip->reg_lock); +} + +static void oxygen_card_free(struct snd_card *card) +{ + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); flush_scheduled_work(); @@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci) } EXPORT_SYMBOL(oxygen_pci_resume); #endif /* CONFIG_PM */ + +void oxygen_pci_shutdown(struct pci_dev *pci) +{ + struct snd_card *card = pci_get_drvdata(pci); + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); + chip->model.cleanup(chip); +} +EXPORT_SYMBOL(oxygen_pci_shutdown); diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index f03a2f2cffe..06c863e86e3 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c @@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = { .suspend = oxygen_pci_suspend, .resume = oxygen_pci_resume, #endif + .shutdown = oxygen_pci_shutdown, }; static int __init alsa_card_xonar_init(void) diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index dbc4b89d74e..b82c1cfa96f 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c @@ -53,6 +53,8 @@ struct xonar_wm87x6 { struct xonar_generic generic; u16 wm8776_regs[0x17]; u16 wm8766_regs[0x10]; + struct snd_kcontrol *line_adcmux_control; + struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; }; @@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip) static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); + wm8776_write(chip, WM8776_RESET, 0); } static void xonar_ds_suspend(struct oxygen *chip) @@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; + struct snd_kcontrol *other_ctl; unsigned int mux_bit = ctl->private_value; u16 reg; int changed; @@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCMUX]; if (value->value.integer.value[0]) { - reg &= ~0x003; reg |= mux_bit; + /* line-in and mic-in are exclusive */ + mux_bit ^= 3; + if (reg & mux_bit) { + reg &= ~mux_bit; + if (mux_bit == 1) + other_ctl = data->line_adcmux_control; + else + other_ctl = data->mic_adcmux_control; + snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &other_ctl->id); + } } else reg &= ~mux_bit; changed = reg != data->wm8776_regs[WM8776_ADCMUX]; @@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip) err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; + if (!strcmp(ctl->id.name, "Line Capture Switch")) + data->line_adcmux_control = ctl; + else if (!strcmp(ctl->id.name, "Mic Capture Switch")) + data->mic_adcmux_control = ctl; } + if (!data->line_adcmux_control || !data->mic_adcmux_control) + return -ENXIO; BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { ctl = snd_ctl_new1(&lc_controls[i], chip); diff --git a/sound/usb/card.c b/sound/usb/card.c index 9feb00c831a..4eabafa5b03 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head) for (idx = 0; idx < 2; idx++) { subs = &as->substream[idx]; if (!subs->num_formats) - return; + continue; snd_usb_release_substream_urbs(subs, 1); subs->interface = -1; } @@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) } switch (protocol) { + default: + snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", + protocol); + /* fall through */ + case UAC_VERSION_1: { struct uac1_ac_header_descriptor *h1 = control_header; @@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) break; } - - default: - snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol); - return -EINVAL; } return 0; @@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev, goto __error; } - chip->ctrl_intf = alts; + /* + * For devices with more than one control interface, we assume the + * first contains the audio controls. We might need a more specific + * check here in the future. + */ + if (!chip->ctrl_intf) + chip->ctrl_intf = alts; if (err > 0) { /* create normal USB audio interfaces */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index b853f8df794..7754a103454 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return set_sample_rate_v1(chip, iface, alts, fmt, rate); case UAC_VERSION_2: return set_sample_rate_v2(chip, iface, alts, fmt, rate); } - - return -EINVAL; } diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 1a701f1e8f5..ef0a07e3484 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) /* get audio formats */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", + dev->devnum, iface_no, altno, protocol); + protocol = UAC_VERSION_1; + /* fall through */ + case UAC_VERSION_1: { struct uac1_as_header_descriptor *as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); @@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) dev->devnum, iface_no, altno, as->bTerminalLink); continue; } - - default: - snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n", - dev->devnum, iface_no, altno, protocol); - continue; } /* get format type */ diff --git a/sound/usb/format.c b/sound/usb/format.c index 3a1375459c0..69148212aa7 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, u64 pcm_formats; switch (protocol) { - case UAC_VERSION_1: { + case UAC_VERSION_1: + default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; @@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, format <<= 1; break; } - - default: - return -EINVAL; } pcm_formats = 0; @@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, * audio class v2 uses class specific EP0 range requests for that. */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: fp->channels = fmt->bNrChannels; ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); @@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, /* fp->channels is already set in this case */ ret = parse_audio_format_rates_v2(chip, fp); break; - default: - snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", - chip->dev->devnum, fp->iface, fp->altsetting, protocol); - return -EINVAL; } if (fp->channels < 1) { @@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, fp->channels = 1; switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: { struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); @@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, ret = parse_audio_format_rates_v2(chip, fp); break; } - default: - snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", - chip->dev->devnum, fp->iface, fp->altsetting, protocol); - return -EINVAL; } return ret; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c166db0057d..3ed3901369c 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, } host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; - mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; + switch (get_iface_desc(host_iface)->bInterfaceProtocol) { + case UAC_VERSION_1: + default: + mixer->protocol = UAC_VERSION_1; + break; + case UAC_VERSION_2: + mixer->protocol = UAC_VERSION_2; + break; + } if ((err = snd_usb_mixer_controls(mixer)) < 0 || (err = snd_usb_mixer_status_create(mixer)) < 0) diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 3634cedf930..3b5135c9306 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return init_pitch_v1(chip, iface, alts, fmt); case UAC_VERSION_2: return init_pitch_v2(chip, iface, alts, fmt); } - - return -EINVAL; } /* diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e72f05c3bef..fcc16e4349d 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, goto error; } tev->point.offset = pev->point.offset; + tev->point.retprobe = pev->point.retprobe; tev->nargs = pev->nargs; if (tev->nargs) { tev->args = zalloc(sizeof(struct probe_trace_arg) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 525136684d4..32b81f707ff 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) char buf[32], *ptr; int ret, nscopes; + if (!is_c_varname(pf->pvar->var)) { + /* Copy raw parameters */ + pf->tvar->value = strdup(pf->pvar->var); + if (pf->tvar->value == NULL) + return -ENOMEM; + if (pf->pvar->type) { + pf->tvar->type = strdup(pf->pvar->type); + if (pf->tvar->type == NULL) + return -ENOMEM; + } + if (pf->pvar->name) { + pf->tvar->name = strdup(pf->pvar->name); + if (pf->tvar->name == NULL) + return -ENOMEM; + } else + pf->tvar->name = NULL; + return 0; + } + if (pf->pvar->name) pf->tvar->name = strdup(pf->pvar->name); else { @@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) if (pf->tvar->name == NULL) return -ENOMEM; - if (!is_c_varname(pf->pvar->var)) { - /* Copy raw parameters */ - pf->tvar->value = strdup(pf->pvar->var); - if (pf->tvar->value == NULL) - return -ENOMEM; - if (pf->pvar->type) { - pf->tvar->type = strdup(pf->pvar->type); - if (pf->tvar->type == NULL) - return -ENOMEM; - } - return 0; - } - pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ @@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) /* This function has no name. */ tev->point.offset = (unsigned long)pf->addr; + /* Return probe must be on the head of a subprogram */ + if (pf->pev->point.retprobe) { + if (tev->point.offset != 0) { + pr_warning("Return probe must be on the head of" + " a real function\n"); + return -EINVAL; + } + tev->point.retprobe = true; + } + pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, tev->point.offset); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1a367734e01..b2f5ae97f33 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str, int symbol__init(void) { + if (symbol_conf.initialized) + return 0; + elf_version(EV_CURRENT); if (symbol_conf.sort_by_name) symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - @@ -2293,6 +2296,7 @@ int symbol__init(void) symbol_conf.sym_list_str, "symbol") < 0) goto out_free_comm_list; + symbol_conf.initialized = true; return 0; out_free_dso_list: @@ -2304,11 +2308,14 @@ out_free_comm_list: void symbol__exit(void) { + if (!symbol_conf.initialized) + return; strlist__delete(symbol_conf.sym_list); strlist__delete(symbol_conf.dso_list); strlist__delete(symbol_conf.comm_list); vmlinux_path__exit(); symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; + symbol_conf.initialized = false; } int machines__create_kernel_maps(struct rb_root *self, pid_t pid) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b7a8da4af5a..ea95c2756f0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -69,7 +69,8 @@ struct symbol_conf { show_nr_samples, use_callchain, exclude_other, - show_cpu_utilization; + show_cpu_utilization, + initialized; const char *vmlinux_name, *source_prefix, *field_sep; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b78b794c103..d4853a54771 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, cpu); hardware_disable(NULL); break; - case CPU_ONLINE: + case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); - smp_call_function_single(cpu, hardware_enable, NULL, 1); + hardware_enable(NULL); break; } return NOTIFY_OK; @@ -2096,7 +2096,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, - .priority = 20, /* must be > scheduler priority */ }; static int vm_stat_get(void *_offset, u64 *val) |