summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/oprofile/common.c2
-rw-r--r--arch/ia64/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/cputime.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h33
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h33
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/sh/oprofile/common.c4
-rw-r--r--arch/sparc/kernel/pci_sun4v.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kvm/i8254.c10
-rw-r--r--arch/x86/kvm/x86.c19
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-tag.c13
-rw-r--r--block/cfq-iosched.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/video/gspca/gspca.c4
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--fs/btrfs/async-thread.c3
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/locks.c11
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/xfs/xfs_super.c30
-rw-r--r--fs/xfs/xfs_sync.c36
-rw-r--r--fs/xfs/xfs_sync.h2
-rw-r--r--include/asm-generic/cputime.h1
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/lglock.h36
-rw-r--r--include/trace/events/writeback.h15
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/mempolicy.c11
-rw-r--r--net/ipv6/route.c28
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--scripts/kconfig/Makefile5
-rw-r--r--virt/kvm/assigned-dev.c93
57 files changed, 398 insertions, 190 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7945b0bd35e..e2a4b528736 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support. Instead it is reported via
+
+ ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
/* Depends on KVM_CAP_IOMMU */
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device. Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment. The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
diff --git a/MAINTAINERS b/MAINTAINERS
index aace4176386..dbf3d94b1de 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2700,7 +2700,7 @@ FIREWIRE SUBSYSTEM
M: Stefan Richter <stefanr@s5r6.in-berlin.de>
L: linux1394-devel@lists.sourceforge.net
W: http://ieee1394.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
S: Maintained
F: drivers/firewire/
F: include/linux/firewire*.h
diff --git a/Makefile b/Makefile
index a43733df397..ea51081812f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index c074e66ad22..4e0a371630b 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 6073b187528..5a274af31b2 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -60,6 +60,7 @@ typedef u64 cputime64_t;
*/
#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs)
/*
* Convert cputime <-> seconds
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 1cf20bdfbec..98b7c4b49c9 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -150,6 +150,8 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
return ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d4df013ad77..69c7377d207 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
}
#endif
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
- unsigned long pte_index)
-{
- unsigned long rb, va_low;
-
- rb = (v & ~0x7fUL) << 16; /* AVA field */
- va_low = pte_index >> 3;
- if (v & HPTE_V_SECONDARY)
- va_low = ~va_low;
- /* xor vsid from AVA */
- if (!(v & HPTE_V_1TB_SEG))
- va_low ^= v >> 12;
- else
- va_low ^= v >> 24;
- va_low &= 0x7ff;
- if (v & HPTE_V_LARGE) {
- rb |= 1; /* L field */
- if (cpu_has_feature(CPU_FTR_ARCH_206) &&
- (r & 0xff000)) {
- /* non-16MB large page, must be 64k */
- /* (masks depend on page size) */
- rb |= 0x1000; /* page encoding in LP field */
- rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
- }
- } else {
- /* 4kB page */
- rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
- }
- rb |= (v >> 54) & 0x300; /* B field */
- return rb;
-}
-
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index e43fe42b987..d0ac94f98f9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
#define SPAPR_TCE_SHIFT 12
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cb137a9b03..336983da9e7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
tpaca->kvm_hstate.napping = 0;
vcpu->cpu = vc->pcpu;
smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
tpaca->cpu_start = 0x80;
wmb();
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 3c791e1eb67..e2cfb9e1e20 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -658,10 +658,12 @@ program_interrupt:
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
+#ifdef CONFIG_KVM_BOOK3S_64_PR
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
r = RESUME_GUEST;
break;
}
+#endif
run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 26d20903f2b..8c0d45a6faf 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -15,6 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 08143487829..b9acaaa175d 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -87,6 +87,8 @@ usecs_to_cputime(const unsigned int m)
return (cputime_t) m * 4096;
}
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
/*
* Convert cputime to milliseconds and back.
*/
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index b4c2d2b946d..e4dd5d5a111 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = sh_backtrace;
return -ENODEV;
}
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b272cda35a0..af5755d20fb 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (!irq)
return -ENOMEM;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
return irq;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8d601b18bf9..121f1be4da1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1169,7 +1169,7 @@ again:
*/
c = &unconstrained;
} else if (intel_try_alt_er(event, orig_idx)) {
- raw_spin_unlock(&era->lock);
+ raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
}
raw_spin_unlock_irqrestore(&era->lock, flags);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 76e3f1cd036..405f2620392 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
{
+ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
struct kvm_timer *pt = &ps->pit_timer;
s64 interval;
+ if (!irqchip_in_kernel(kvm))
+ return;
+
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
/* FIXME: enhance mode 4 precision */
case 4:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
- create_pit_timer(ps, val, 0);
+ create_pit_timer(kvm, val, 0);
}
break;
case 2:
case 3:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
- create_pit_timer(ps, val, 1);
+ create_pit_timer(kvm, val, 1);
}
break;
default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c38efd7b792..4c938da2ba0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct kvm_lapic *apic = vcpu->arch.apic;
- u32 timer_mode_mask;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (!best)
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
best->ecx |= bit(X86_FEATURE_OSXSAVE);
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- best->function == 0x1) {
- best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
- timer_mode_mask = 3 << 17;
- } else
- timer_mode_mask = 1 << 17;
-
- if (apic)
- apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+ if (apic) {
+ if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+ apic->lapic_timer.timer_mode_mask = 3 << 17;
+ else
+ apic->lapic_timer.timer_mode_mask = 1 << 17;
+ }
}
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
+ case KVM_CAP_TSC_DEADLINE_TIMER:
+ r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+ break;
default:
r = 0;
break;
diff --git a/block/blk-map.c b/block/blk-map.c
index 164cd005970..623e1cd4cff 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
+ if (!reading)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e74d6d13838..4af6f5cc116 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- int tag = rq->tag;
+ unsigned tag = rq->tag; /* negative tags invalid */
- BUG_ON(tag == -1);
-
- if (unlikely(tag >= bqt->max_depth)) {
- /*
- * This can happen after tag depth has been reduced.
- * But tag shouldn't be larger than real_max_depth.
- */
- WARN_ON(tag >= bqt->real_max_depth);
- return;
- }
+ BUG_ON(tag >= bqt->real_max_depth);
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4c12869fcf7..3548705b04e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
/*
* reposition in fifo if next is older than rq
*/
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
cfq_remove_request(next);
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(next), rq_is_sync(next));
+
+ cfqq = RQ_CFQQ(next);
+ /*
+ * all requests of this queue are merged to other queues, delete it
+ * from the service tree. If it's the active_queue,
+ * cfq_dispatch_requests() will choose to expire it or do idle
+ */
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+ cfqq != cfqd->active_queue)
+ cfq_del_cfqq_rr(cfqd, cfqq);
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c681dc149d2..b9da8900ae4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev)
if (i915_semaphores >= 0)
return i915_semaphores;
- /* Enable semaphores on SNB when IO remapping is off */
+ /* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
- return !intel_iommu_enabled;
+ return 0;
return 1;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d809b038ca8..daa5743ccbd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7922,13 +7922,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
return 0;
/*
- * Enable rc6 on Sandybridge if DMA remapping is disabled
+ * Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
- intel_iommu_enabled ? "true" : "false",
- !intel_iommu_enabled ? "en" : "dis");
- return !intel_iommu_enabled;
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
}
DRM_DEBUG_DRIVER("RC6 enabled\n");
return 1;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5e00d1670aa..92c9628c572 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3276,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
}
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8aa1dbb45c6..f94b33ae221 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1093,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- u64 required_size;
int ret;
/**
@@ -1102,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* requested framebuffer.
*/
- required_size = mode_cmd->pitch * mode_cmd->height;
- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode_cmd->pitch,
+ mode_cmd->height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df55..5b5fa5cdaa3 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b6907118283..6d03774b176 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
@@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08c..627456542fb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ee981737edf..f47f1f8ac44 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ }
}
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31670f8d6b6..858fdbb7eb0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c7ffe..512f32ff446 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
/* do the specific subdriver stuff before endpoint selection */
- gspca_dev->alt = 0;
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a357..54a4a3f22e2 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
struct isp_pipeline *pipe =
to_isp_pipeline(&ccdc->video_out.video.entity);
- struct video_device *vdev = &ccdc->subdev.devnode;
+ struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c5..bc0b2c7349b 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e0639ebebe5..8bcc66f2c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -657,6 +657,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
u8 port_type;
int err;
+#define MLX4_VF_PORT_ETH_ONLY_MASK 0xE6
+
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -671,8 +673,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(port_type, outbox->buf,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- /* disable ib */
- port_type &= 0xFE;
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
/* check eth is enabled for this port */
if (!(port_type & 2))
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 1209934844c..6bb62c580e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -332,7 +332,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
* and perform sense_port FW command to try and set the correct
* port type from beginning
*/
- if (mlx4_priv(dev)->sense.sense_allowed && dev->caps.default_sense[i]) {
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
mlx4_SENSE_PORT(dev, i, &sensed_port);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d1c3dce15dc..2055386eda5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -440,7 +440,13 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
@@ -502,7 +508,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -521,7 +527,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
@@ -721,6 +727,10 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
virtnet_napi_enable(vi);
return 0;
}
@@ -774,6 +784,8 @@ static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
@@ -1100,7 +1112,6 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
@@ -1139,9 +1150,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
-
unregister_netdev(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index cb97174e236..0b394580d86 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
struct list_head *fallback;
int ret;
-again:
spin_lock_irqsave(&workers->lock, flags);
+again:
worker = next_worker(workers);
if (!worker) {
@@ -579,6 +579,7 @@ again:
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
ret = __btrfs_start_workers(workers);
+ spin_lock_irqsave(&workers->lock, flags);
if (ret)
goto fallback;
goto again;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0a6b928813a..fd1a06df5bc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4590,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
if (err > 0)
err = -EEXIST;
return err;
@@ -4655,6 +4651,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
else {
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4722,6 +4719,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4779,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
+ d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
@@ -7245,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
drop_inode = 1;
out_unlock:
+ if (!err)
+ d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
if (drop_inode) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ac86f8b3e3c..517f211a3bd 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -47,17 +47,6 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
-const char *wb_reason_name[] = {
- [WB_REASON_BACKGROUND] = "background",
- [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
- [WB_REASON_SYNC] = "sync",
- [WB_REASON_PERIODIC] = "periodic",
- [WB_REASON_LAPTOP_TIMER] = "laptop_timer",
- [WB_REASON_FREE_MORE_MEM] = "free_more_memory",
- [WB_REASON_FS_FREE_SPACE] = "fs_free_space",
- [WB_REASON_FORKER_THREAD] = "forker_thread"
-};
-
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure so that the definition remains local to this
diff --git a/fs/locks.c b/fs/locks.c
index 3b0d05dcd7c..637694bf3a0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
int want_write = (mode & O_ACCMODE) != O_RDONLY;
new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+ if (IS_ERR(new_fl))
+ return PTR_ERR(new_fl);
lock_flocks();
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
- if (IS_ERR(new_fl) && !i_have_this_lease
- && ((mode & O_NONBLOCK) == 0)) {
- error = PTR_ERR(new_fl);
- goto out;
- }
-
break_time = 0;
if (lease_break_time > 0) {
break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ restart:
out:
unlock_flocks();
- if (!IS_ERR(new_fl))
- locks_free_lock(new_fl);
+ locks_free_lock(new_fl);
return error;
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 2a30d67dd6b..0855e6f2039 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
idle = kstat_cpu(cpu).cpustat.idle;
idle = cputime64_add(idle, arch_idle_time(cpu));
} else
- idle = nsecs_to_jiffies64(1000 * idle_time);
+ idle = usecs_to_cputime64(idle_time);
return idle;
}
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
/* !NO_HZ so we can rely on cpustat.iowait */
iowait = kstat_cpu(cpu).cpustat.iowait;
else
- iowait = nsecs_to_jiffies64(1000 * iowait_time);
+ iowait = usecs_to_cputime64(iowait_time);
return iowait;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 3eca58f51ae..8a899496fd5 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -869,27 +869,6 @@ xfs_fs_dirty_inode(
}
STATIC int
-xfs_log_inode(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- return xfs_trans_commit(tp, 0);
-}
-
-STATIC int
xfs_fs_write_inode(
struct inode *inode,
struct writeback_control *wbc)
@@ -902,10 +881,8 @@ xfs_fs_write_inode(
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
- if (!ip->i_update_core)
- return 0;
- if (wbc->sync_mode == WB_SYNC_ALL) {
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
/*
* Make sure the inode has made it it into the log. Instead
* of forcing it all the way to stable storage using a
@@ -913,11 +890,14 @@ xfs_fs_write_inode(
* ->sync_fs call do that for thus, which reduces the number
* of synchronous log forces dramatically.
*/
- error = xfs_log_inode(ip);
+ error = xfs_log_dirty_inode(ip, NULL, 0);
if (error)
goto out;
return 0;
} else {
+ if (!ip->i_update_core)
+ return 0;
+
/*
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index be5c51d8f75..f0994aedcd1 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
return error;
}
+int
+xfs_log_dirty_inode(
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ int flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ if (!ip->i_update_core)
+ return 0;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp, 0);
+}
+
/*
* When remounting a filesystem read-only or freezing the filesystem, we have
* two phases to execute. This first phase is syncing the data before we
@@ -359,6 +385,16 @@ xfs_quiesce_data(
{
int error, error2 = 0;
+ /*
+ * Log all pending size and timestamp updates. The vfs writeback
+ * code is supposed to do this, but due to its overagressive
+ * livelock detection it will skip inodes where appending writes
+ * were written out in the first non-blocking sync phase if their
+ * completion took long enough that it happened after taking the
+ * timestamp for the cut-off in the blocking phase.
+ */
+ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
xfs_qm_sync(mp, SYNC_TRYLOCK);
xfs_qm_sync(mp, SYNC_WAIT);
diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h
index 941202e7ac6..fa965479d78 100644
--- a/fs/xfs/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inodes(struct xfs_inode *ip);
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 62ce6823c0f..12a1764f612 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -40,6 +40,7 @@ typedef u64 cputime64_t;
*/
#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
+#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000)
/*
* Convert cputime to seconds and back.
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c3892fc1d53..68e67e50d02 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
#define KVM_CAP_PPC_PAPR 68
#define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index f549056fb20..87f402ccec5 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/lockdep.h>
#include <linux/percpu.h>
+#include <linux/cpu.h>
/* can make br locks by using local lock for read side, global lock for write */
#define br_lock_init(name) name##_lock_init()
@@ -72,9 +73,31 @@
#define DEFINE_LGLOCK(name) \
\
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -83,6 +106,11 @@
lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -124,9 +152,9 @@
\
void name##_global_lock_online(void) { \
int i; \
- preempt_disable(); \
+ spin_lock(&name##_cpu_lock); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -137,12 +165,12 @@
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
} \
- preempt_enable(); \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b99caa8b780..99d1d0decf8 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -21,6 +21,16 @@
{I_REFERENCED, "I_REFERENCED"} \
)
+#define WB_WORK_REASON \
+ {WB_REASON_BACKGROUND, "background"}, \
+ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
+ {WB_REASON_SYNC, "sync"}, \
+ {WB_REASON_PERIODIC, "periodic"}, \
+ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
+ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
+ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
+ {WB_REASON_FORKER_THREAD, "forker_thread"}
+
struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- wb_reason_name[__entry->reason]
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- wb_reason_name[__entry->reason])
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
+ )
);
TRACE_EVENT(global_dirty_state,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 73f17c0293c..2316840b337 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -901,7 +901,6 @@ retry:
h->resv_huge_pages += delta;
ret = 0;
- spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
@@ -915,6 +914,7 @@ retry:
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
+ spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
free:
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index adc39548181..c3fdbcb1765 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
+ pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (!vma || vma->vm_start > start)
return -EFAULT;
+ if (start > vma->vm_start)
+ prev = vma;
+
for (; vma && vma->vm_start < end; prev = vma, vma = next) {
next = vma->vm_next;
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+
+ pgoff = vma->vm_pgoff +
+ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+ vma->anon_vma, vma->vm_file, pgoff,
new_pol);
if (prev) {
vma = prev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0940729d2f9..4a62c47599b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -129,11 +129,14 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const voi
return neigh_create(&nd_tbl, daddr, dst->dev);
}
-static int rt6_bind_neighbour(struct rt6_info *rt)
+static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
{
- struct neighbour *n = ip6_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
+ if (!n) {
+ n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ }
dst_set_neighbour(&rt->dst, n);
return 0;
@@ -746,7 +749,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
#endif
retry:
- if (rt6_bind_neighbour(rt)) {
+ if (rt6_bind_neighbour(rt, rt->dst.dev)) {
struct net *net = dev_net(rt->dst.dev);
int saved_rt_min_interval =
net->ipv6.sysctl.ip6_rt_gc_min_interval;
@@ -1397,7 +1400,7 @@ int ip6_route_add(struct fib6_config *cfg)
rt->rt6i_prefsrc.plen = 0;
if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
- err = rt6_bind_neighbour(rt);
+ err = rt6_bind_neighbour(rt, dev);
if (err)
goto out;
}
@@ -2084,7 +2087,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_flags |= RTF_ANYCAST;
else
rt->rt6i_flags |= RTF_LOCAL;
- err = rt6_bind_neighbour(rt);
+ err = rt6_bind_neighbour(rt, rt->dst.dev);
if (err) {
dst_free(&rt->dst);
return ERR_PTR(err);
@@ -2360,11 +2363,13 @@ static int rt6_fill_node(struct net *net,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
+ const struct inet_peer *peer;
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
u32 table;
struct neighbour *n;
+ u32 ts, tsage;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2471,7 +2476,14 @@ static int rt6_fill_node(struct net *net,
else
expires = INT_MAX;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+ peer = rt->rt6i_peer;
+ ts = tsage = 0;
+ if (peer && peer->tcp_ts_stamp) {
+ ts = peer->tcp_ts;
+ tsage = get_seconds() - peer->tcp_ts_stamp;
+ }
+
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
expires, rt->dst.error) < 0)
goto nla_put_failure;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f6275a0f1ea..f8ac4ef0b79 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -908,7 +908,7 @@ config NETFILTER_XT_MATCH_MULTIPORT
config NETFILTER_XT_MATCH_NFACCT
tristate '"nfacct" match support'
- default m if NETFILTER_ADVANCED=n
+ depends on NETFILTER_ADVANCED
select NETFILTER_NETLINK_ACCT
help
This option allows you to use the extended accounting through
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 85033344aed..bb10c077a01 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1367,12 +1367,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
+ spin_lock_bh(&nf_conntrack_lock);
err = -EOPNOTSUPP;
goto err1;
}
+ spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname,
nf_ct_l3num(ct),
@@ -1880,25 +1883,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
err = -ENOMEM;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL)
+ if (skb2 == NULL) {
+ nf_ct_expect_put(exp);
goto out;
+ }
rcu_read_lock();
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
rcu_read_unlock();
+ nf_ct_expect_put(exp);
if (err <= 0)
goto free;
- nf_ct_expect_put(exp);
+ err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
- return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ return 0;
free:
kfree_skb(skb2);
out:
- nf_ct_expect_put(exp);
- return err;
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
}
static int
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e56ca75e3f4..2dbb32b988c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2453,8 +2453,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
{
struct packet_sock *po = pkt_sk(sk);
- if (po->fanout)
+ if (po->fanout) {
+ if (dev)
+ dev_put(dev);
+
return -EINVAL;
+ }
lock_sock(sk);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ffcaa597594..a92c1b3dab8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -532,7 +532,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return -EINVAL;
s = sizeof(struct disttable) + n * sizeof(s16);
- d = kmalloc(s, GFP_KERNEL);
+ d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
if (!d)
d = vmalloc(s);
if (!d)
@@ -545,9 +545,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- dist_free(q->delay_dist);
- q->delay_dist = d;
+ swap(q->delay_dist, d);
spin_unlock_bh(root_lock);
+
+ dist_free(d);
return 0;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1dcfb5223a8..b8e156319d7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -346,6 +346,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nest;
struct tc_tbf_qopt opt;
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ba573fe7c74..914833d99b0 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
--directory=$(srctree) --directory=$(objtree) \
--output $(obj)/config.pot
$(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
- $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
- $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \
+ $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
+ $(srctree)/arch/*/um/Kconfig`; \
do \
echo " GEN $$i"; \
$(obj)/kxgettext $$i \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
done )
$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
--output $(obj)/linux.pot
- $(Q)rm -f $(srctree)/arch/um/Kconfig
$(Q)rm -f $(obj)/config.pot
PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 3ad0925d23a..758e3b36d4c 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -17,6 +17,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
#include "irq.h"
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ out:
return r;
}
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device. To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs. PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file. We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+ int i;
+ bool bar_found = false;
+
+ for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+ char *kpath, *syspath;
+ struct path path;
+ struct inode *inode;
+ int r;
+
+ if (!pci_resource_len(dev, i))
+ continue;
+
+ kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+ if (!kpath)
+ return -ENOMEM;
+
+ /* Per sysfs-rules, sysfs is always at /sys */
+ syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+ kfree(kpath);
+ if (!syspath)
+ return -ENOMEM;
+
+ r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+ kfree(syspath);
+ if (r)
+ return r;
+
+ inode = path.dentry->d_inode;
+
+ r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+ path_put(&path);
+ if (r)
+ return r;
+
+ bar_found = true;
+ }
+
+ /* If no resources, probably something special */
+ if (!bar_found)
+ return -EPERM;
+
+ return 0;
+#else
+ return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
+ u8 header_type;
+
+ if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+ return -EINVAL;
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
r = -EINVAL;
goto out_free;
}
+
+ /* Don't allow bridges to be assigned */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+ if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+ r = -EPERM;
+ goto out_put;
+ }
+
+ r = probe_sysfs_permissions(dev);
+ if (r)
+ goto out_put;
+
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
list_add(&match->list, &kvm->arch.assigned_dev_head);
- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
- if (!kvm->arch.iommu_domain) {
- r = kvm_iommu_map_guest(kvm);
- if (r)
- goto out_list_del;
- }
- r = kvm_assign_device(kvm, match);
+ if (!kvm->arch.iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
+ r = kvm_assign_device(kvm, match);
+ if (r)
+ goto out_list_del;
out:
srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
goto out;
}
- if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
- kvm_deassign_device(kvm, match);
+ kvm_deassign_device(kvm, match);
kvm_free_assigned_device(kvm, match);