Discussion:
[PATCH 07/40] KVM: VMX: Make guest cr4 mask more conservative
Avi Kivity
2010-02-10 17:22:36 UTC
Permalink
Instead of specifying the bits which we want to trap on, specify the bits
which we allow the guest to change transparently. This is safer wrt future
changes to cr4.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/vmx.c | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 284e905..755811a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -69,8 +69,10 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
(X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_GUEST_CR4_MASK \
- (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
+#define KVM_CR4_GUEST_OWNED_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXMMEXCPT)
+
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)

@@ -2421,8 +2423,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);

vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
- vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
- vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK;
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);

tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this);
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:31 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Windows 2003 uses task switch to triple fault and reboot (the other
exception being reserved pdptrs bits).

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/kvm/x86.c | 11 +++++++++++
1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b503614..47fab87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4411,6 +4411,15 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
(kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
}

+static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
+ u16 selector)
+{
+ /* NULL selector is not valid for CS and SS */
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
+ if (!selector)
+ kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
+}
+
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
int type_bits, int seg)
{
@@ -4420,6 +4429,8 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
return kvm_load_realmode_segment(vcpu, selector, seg);
if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
return 1;
+
+ kvm_check_segment_descriptor(vcpu, seg, selector);
kvm_seg.type |= type_bits;

if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:41 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

KVM need vsyscall_init() to initialize MSR_TSC_AUX before it read the value.
Per Avi's suggestion, this patch raised vsyscall priority on hotplug notifier
chain, to 30.

CC: Ingo Molnar <***@elte.hu>
CC: linux-***@vger.kernel.org
Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kernel/vsyscall_64.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9055e58..1c0c6ab 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -301,7 +301,8 @@ static int __init vsyscall_init(void)
register_sysctl_table(kernel_root_table2);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 1);
- hotcpu_notifier(cpu_vsyscall_notifier, 0);
+ /* notifier priority > KVM */
+ hotcpu_notifier(cpu_vsyscall_notifier, 30);
return 0;
}
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:33 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

We don't support these instructions, but guest can execute them even if the
feature('monitor') haven't been exposed in CPUID. So we would trap and inject
a #UD if guest try this way.

Cc: ***@kernel.org
Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/include/asm/vmx.h | 1 +
arch/x86/kvm/vmx.c | 10 ++++++++++
2 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2b49454..8f6b011 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -251,6 +251,7 @@ enum vmcs_field {
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4918d6..8a8e139 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1224,6 +1224,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING;
opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
@@ -3416,6 +3418,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
return 1;
}

+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -3453,6 +3461,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
};

static const int kvm_vmx_max_exit_handlers =
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:32 UTC
Permalink
In the past we've had errors of single-bit in the other two cases; the
printk() may confirm it for the third case (many->many).

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/mmu.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 89a49fb..4f499d7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -662,6 +662,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
prev_desc = desc;
desc = desc->more;
}
+ pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
BUG();
}
}
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:05 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 4 ++--
arch/powerpc/kvm/book3s.c | 4 ++--
arch/x86/kvm/i8254.c | 2 +-
arch/x86/kvm/i8259.c | 4 ++--
arch/x86/kvm/vmx.c | 8 ++++----
arch/x86/kvm/x86.c | 16 ++++++++--------
include/linux/kvm_host.h | 2 +-
virt/kvm/coalesced_mmio.c | 14 +++++++-------
virt/kvm/eventfd.c | 10 +++++-----
virt/kvm/ioapic.c | 4 ++--
virt/kvm/kvm_main.c | 10 +++++-----
11 files changed, 39 insertions(+), 39 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d5e3846..e6ac549 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->arch.dirty_log_lock);

r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1854,7 +1854,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
}
r = 0;
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
spin_unlock(&kvm->arch.dirty_log_lock);
return r;
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index bb8873d..492dcc1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -857,7 +857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int is_dirty = 0;
int r, n;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
@@ -879,7 +879,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,

r = 0;
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}

diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index a7259e8..caad189 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -602,7 +602,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
.write = speaker_ioport_write,
};

-/* Caller must have writers lock on slots_lock */
+/* Caller must hold slots_lock */
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index b7d145b..d5753a7 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -533,9 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
* Initialize PIO device
*/
kvm_iodevice_init(&s->dev, &picdev_ops);
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kfree(s);
return NULL;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 22ab713..f04e2ff 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2223,7 +2223,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2236,7 +2236,7 @@ static int alloc_apic_access_page(struct kvm *kvm)

kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}

@@ -2245,7 +2245,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.ept_identity_pagetable)
goto out;
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2260,7 +2260,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0cb795c..4b00831 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2209,14 +2209,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);

kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;

spin_unlock(&kvm->mmu_lock);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}

@@ -2293,7 +2293,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
if (!aliases)
goto out;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

/* invalidate any gfn reference in case of deletion/shrinking */
memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
@@ -2329,7 +2329,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
r = 0;

out_unlock:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
out:
return r;
}
@@ -2463,7 +2463,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS)
@@ -2513,7 +2513,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
out_free:
vfree(dirty_bitmap);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}

@@ -2626,7 +2626,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
@@ -2635,7 +2635,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0bb9aa2..bb0314e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -161,7 +161,7 @@ struct kvm_memslots {
struct kvm {
spinlock_t mmu_lock;
spinlock_t requests_lock;
- struct rw_semaphore slots_lock;
+ struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
struct srcu_struct srcu;
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index a736a93..5de6594 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -110,9 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
goto out_free_dev;

@@ -140,16 +140,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return -ENOBUFS;
}

dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;

- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}

@@ -163,7 +163,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

i = dev->nb_zones;
while(i) {
@@ -181,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
i--;
}

- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);

return 0;
}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 315a586..486c604 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -508,7 +508,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
else
p->wildcard = true;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

/* Verify that there isnt a match already */
if (ioeventfd_check_collision(kvm, p)) {
@@ -524,12 +524,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)

list_add_tail(&p->list, &kvm->ioeventfds);

- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);

return 0;

unlock_fail:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);

fail:
kfree(p);
@@ -551,7 +551,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);

list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
@@ -571,7 +571,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
break;
}

- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);

eventfd_ctx_put(eventfd);

diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index f326a6f..f01392f 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -372,9 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
kfree(ioapic);

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 659bc12..2b7cd6c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -429,7 +429,7 @@ static struct kvm *kvm_create_vm(void)
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
- init_rwsem(&kvm->slots_lock);
+ mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
@@ -763,9 +763,9 @@ int kvm_set_memory_region(struct kvm *kvm,
{
int r;

- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -1997,7 +1997,7 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
return -EOPNOTSUPP;
}

-/* Caller must have write lock on slots_lock. */
+/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
@@ -2019,7 +2019,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
return 0;
}

-/* Caller must have write lock on slots_lock. */
+/* Caller must hold slots_lock. */
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:04 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 15 ++++++---------
arch/s390/kvm/kvm-s390.h | 10 +++++++---
arch/x86/kvm/mmu.c | 7 +++----
arch/x86/kvm/vmx.c | 6 +++---
arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++-------------------
include/linux/kvm_host.h | 2 ++
6 files changed, 45 insertions(+), 38 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d0ad538..d5e3846 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
- int r;
+ int r, idx;

- /*
- * down_read() may sleep and return with interrupts enabled
- */
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);

again:
if (signal_pending(current)) {
@@ -663,7 +660,7 @@ again:
if (r < 0)
goto vcpu_run_fail;

- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm_guest_enter();

/*
@@ -687,7 +684,7 @@ again:
kvm_guest_exit();
preempt_enable();

- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);

r = kvm_handle_exit(kvm_run, vcpu);

@@ -697,10 +694,10 @@ again:
}

out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (r > 0) {
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
goto again;
}

diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce82..60f09ab 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)

static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
{
+ int idx;
struct kvm_memory_slot *mem;
+ struct kvm_memslots *memslots;

- down_read(&vcpu->kvm->slots_lock);
- mem = &vcpu->kvm->memslots[0];
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ memslots = rcu_dereference(vcpu->kvm->memslots);
+
+ mem = &memslots->memslots[0];

vcpu->arch.sie_block->gmsor = mem->userspace_addr;
vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
(mem->npages << PAGE_SHIFT) +
VIRTIODESCSPACE - 1ul;

- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}

/* implemented in priv.c */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f8bf42a..25aabd0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2933,10 +2933,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&kvm_lock);

list_for_each_entry(kvm, &vm_list, vm_list) {
- int npages;
+ int npages, idx;

- if (!down_read_trylock(&kvm->slots_lock))
- continue;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages;
@@ -2949,7 +2948,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
nr_to_scan--;

spin_unlock(&kvm->mmu_lock);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f1cae7d..22ab713 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2478,10 +2478,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 msr;
- int ret;
+ int ret, idx;

vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!init_rmode(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
@@ -2589,7 +2589,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->emulation_required = 0;

out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9adeae..0cb795c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1307,15 +1307,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
- int i;
+ int i, idx;

vcpu_load(vcpu);

- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);

vcpu_put(vcpu);

@@ -3901,14 +3901,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ int idx;

if (!apic || !apic->vapic_addr)
return;

- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}

static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -4037,7 +4038,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_lapic_sync_to_vapic(vcpu);
}

- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

kvm_guest_enter();

@@ -4079,7 +4080,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)

preempt_enable();

- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

/*
* Profile KVM exit RIPs:
@@ -4101,6 +4102,7 @@ out:
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
+ struct kvm *kvm = vcpu->kvm;

if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -4112,7 +4114,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}

- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vapic_enter(vcpu);

r = 1;
@@ -4120,9 +4122,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu);
else {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
{
switch(vcpu->arch.mp_state) {
@@ -4157,13 +4159,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
++vcpu->stat.signal_exits;
}
if (need_resched()) {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}

- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
post_kvm_run_save(vcpu);

vapic_exit(vcpu);
@@ -4202,10 +4204,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;

- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r == EMULATE_DO_MMIO) {
/*
* Read-modify-write. Back to userspace.
@@ -4968,11 +4970,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
unsigned long vaddr = tr->linear_address;
gpa_t gpa;
+ int idx;

vcpu_load(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
@@ -5224,11 +5227,13 @@ fail:

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
+ int idx;
+
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_mmu_destroy(vcpu);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
}

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5e9cb90..0bb9aa2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -83,6 +83,8 @@ struct kvm_vcpu {
struct kvm_run *run;
unsigned long requests;
unsigned long guest_debug;
+ int srcu_idx;
+
int fpu_active;
int guest_fpu_loaded;
wait_queue_head_t wq;
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:03 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 4 +-
arch/x86/kvm/i8254.c | 6 +-
arch/x86/kvm/i8259.c | 4 +-
arch/x86/kvm/x86.c | 13 +++---
include/linux/kvm_host.h | 27 ++++++------
virt/kvm/coalesced_mmio.c | 4 +-
virt/kvm/eventfd.c | 8 ++--
virt/kvm/ioapic.c | 4 +-
virt/kvm/kvm_main.c | 106 ++++++++++++++++++++++++++------------------
9 files changed, 101 insertions(+), 75 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index b2e4d16..d0ad538 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
mmio:
if (p->dir)
- r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
else
- r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba4..a7259e8 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -642,13 +642,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);

kvm_iodevice_init(&pit->dev, &pit_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev);
if (ret < 0)
goto fail;

if (flags & KVM_PIT_SPEAKER_DUMMY) {
kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus,
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
&pit->speaker_dev);
if (ret < 0)
goto fail_unregister;
@@ -657,7 +657,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
return pit;

fail_unregister:
- __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev);
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);

fail:
if (pit->irq_source_id >= 0)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index d057c0c..b7d145b 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -533,7 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
* Initialize PIO device
*/
kvm_iodevice_init(&s->dev, &picdev_ops);
- ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev);
+ down_write(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
+ up_write(&kvm->slots_lock);
if (ret < 0) {
kfree(s);
return NULL;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59f4da4..b9adeae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2852,7 +2852,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
return 0;

- return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}

static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
@@ -2861,7 +2861,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
return 0;

- return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}

static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
@@ -3346,11 +3346,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;

if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
+ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
return r;
}

@@ -3361,7 +3362,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
int i, r = 0;

for (i = 0; i < io->cur_count; i++) {
- if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
+ if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
io->port, io->size, pd)) {
r = -EOPNOTSUPP;
break;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 20941c0..5e9cb90 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -57,20 +57,20 @@ struct kvm_io_bus {
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};

-void kvm_io_bus_init(struct kvm_io_bus *bus);
-void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
- const void *val);
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
+enum kvm_bus {
+ KVM_MMIO_BUS,
+ KVM_PIO_BUS,
+ KVM_NR_BUSES
+};
+
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val);
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
void *val);
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);

struct kvm_vcpu {
struct kvm *kvm;
@@ -171,8 +171,7 @@ struct kvm {
atomic_t online_vcpus;
struct list_head vm_list;
struct mutex lock;
- struct kvm_io_bus mmio_bus;
- struct kvm_io_bus pio_bus;
+ struct kvm_io_bus *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index d68e6c6..a736a93 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -110,7 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;

- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
+ down_write(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ up_write(&kvm->slots_lock);
if (ret < 0)
goto out_free_dev;

diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a9d3fc6..315a586 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -463,7 +463,7 @@ static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
int ret;
@@ -518,7 +518,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)

kvm_iodevice_init(&p->dev, &ioeventfd_ops);

- ret = __kvm_io_bus_register_dev(bus, &p->dev);
+ ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
if (ret < 0)
goto unlock_fail;

@@ -542,7 +542,7 @@ static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;
@@ -565,7 +565,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (!p->wildcard && p->datamatch != args->datamatch)
continue;

- __kvm_io_bus_unregister_dev(bus, &p->dev);
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
ioeventfd_release(p);
ret = 0;
break;
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 38a2d20..f326a6f 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -372,7 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev);
+ down_write(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ up_write(&kvm->slots_lock);
if (ret < 0)
kfree(ioapic);

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c680f7b..659bc12 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
static int hardware_enable_all(void);
static void hardware_disable_all(void);

+static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+
static bool kvm_rebooting;

static bool largepages_enabled = true;
@@ -367,7 +369,7 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)

static struct kvm *kvm_create_vm(void)
{
- int r = 0;
+ int r = 0, i;
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
@@ -391,6 +393,14 @@ static struct kvm *kvm_create_vm(void)
goto out_err;
if (init_srcu_struct(&kvm->srcu))
goto out_err;
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
+ GFP_KERNEL);
+ if (!kvm->buses[i]) {
+ cleanup_srcu_struct(&kvm->srcu);
+ goto out_err;
+ }
+ }

#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -416,11 +426,9 @@ static struct kvm *kvm_create_vm(void)
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
spin_lock_init(&kvm->requests_lock);
- kvm_io_bus_init(&kvm->pio_bus);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
- kvm_io_bus_init(&kvm->mmio_bus);
init_rwsem(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
@@ -435,6 +443,8 @@ out:
out_err:
hardware_disable_all();
out_err_nodisable:
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kfree(kvm->buses[i]);
kfree(kvm->memslots);
kfree(kvm);
return ERR_PTR(r);
@@ -480,6 +490,7 @@ void kvm_free_physmem(struct kvm *kvm)

static void kvm_destroy_vm(struct kvm *kvm)
{
+ int i;
struct mm_struct *mm = kvm->mm;

kvm_arch_sync_events(kvm);
@@ -487,8 +498,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- kvm_io_bus_destroy(&kvm->pio_bus);
- kvm_io_bus_destroy(&kvm->mmio_bus);
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kvm_io_bus_destroy(kvm->buses[i]);
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1949,12 +1960,7 @@ static struct notifier_block kvm_reboot_notifier = {
.priority = 0,
};

-void kvm_io_bus_init(struct kvm_io_bus *bus)
-{
- memset(bus, 0, sizeof(*bus));
-}
-
-void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;

@@ -1963,13 +1969,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)

kvm_iodevice_destructor(pos);
}
+ kfree(bus);
}

/* kvm_io_bus_write - called under kvm->slots_lock */
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
return 0;
@@ -1977,59 +1985,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
}

/* kvm_io_bus_read - called under kvm->slots_lock */
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
+
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
return 0;
return -EOPNOTSUPP;
}

-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must have write lock on slots_lock. */
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int ret;
-
- down_write(&kvm->slots_lock);
- ret = __kvm_io_bus_register_dev(bus, dev);
- up_write(&kvm->slots_lock);
+ struct kvm_io_bus *new_bus, *bus;

- return ret;
-}
-
-/* An unlocked version. Caller must have write lock on slots_lock. */
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
+ bus = kvm->buses[bus_idx];
if (bus->dev_count > NR_IOBUS_DEVS-1)
return -ENOSPC;

- bus->devs[bus->dev_count++] = dev;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+ new_bus->devs[new_bus->dev_count++] = dev;
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);

return 0;
}

-void kvm_io_bus_unregister_dev(struct kvm *kvm,
- struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must have write lock on slots_lock. */
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- down_write(&kvm->slots_lock);
- __kvm_io_bus_unregister_dev(bus, dev);
- up_write(&kvm->slots_lock);
-}
+ int i, r;
+ struct kvm_io_bus *new_bus, *bus;

-/* An unlocked version. Caller must have write lock on slots_lock. */
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
- int i;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;

- for (i = 0; i < bus->dev_count; i++)
- if (bus->devs[i] == dev) {
- bus->devs[i] = bus->devs[--bus->dev_count];
+ bus = kvm->buses[bus_idx];
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+
+ r = -ENOENT;
+ for (i = 0; i < new_bus->dev_count; i++)
+ if (new_bus->devs[i] == dev) {
+ r = 0;
+ new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
break;
}
+
+ if (r) {
+ kfree(new_bus);
+ return r;
+ }
+
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
+ return r;
}

static struct notifier_block kvm_cpu_notifier = {
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:30 UTC
Permalink
From: Eddie Dong <***@intel.com>

Move Double-Fault generation logic out of page fault
exception generating function to cover more generic case.

Signed-off-by: Eddie Dong <***@intel.com>
Signed-off-by: Gleb Natapov <***@redhat.com>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/kvm/x86.c | 89 +++++++++++++++++++++++++++++++++++----------------
1 files changed, 61 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad4..b503614 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,12 +257,68 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);

+#define EXCPT_BENIGN 0
+#define EXCPT_CONTRIBUTORY 1
+#define EXCPT_PF 2
+
+static int exception_class(int vector)
+{
+ switch (vector) {
+ case PF_VECTOR:
+ return EXCPT_PF;
+ case DE_VECTOR:
+ case TS_VECTOR:
+ case NP_VECTOR:
+ case SS_VECTOR:
+ case GP_VECTOR:
+ return EXCPT_CONTRIBUTORY;
+ default:
+ break;
+ }
+ return EXCPT_BENIGN;
+}
+
+static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
+ unsigned nr, bool has_error, u32 error_code)
+{
+ u32 prev_nr;
+ int class1, class2;
+
+ if (!vcpu->arch.exception.pending) {
+ queue:
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = has_error;
+ vcpu->arch.exception.nr = nr;
+ vcpu->arch.exception.error_code = error_code;
+ return;
+ }
+
+ /* to check exception */
+ prev_nr = vcpu->arch.exception.nr;
+ if (prev_nr == DF_VECTOR) {
+ /* triple fault -> shutdown */
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return;
+ }
+ class1 = exception_class(prev_nr);
+ class2 = exception_class(nr);
+ if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
+ || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
+ /* generate double fault per SDM Table 5-5 */
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = true;
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ } else
+ /* replace previous exception with a new one in a hope
+ that instruction re-execution will regenerate lost
+ exception */
+ goto queue;
+}
+
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = false;
- vcpu->arch.exception.nr = nr;
+ kvm_multiple_exception(vcpu, nr, false, 0);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);

@@ -270,25 +326,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
++vcpu->stat.pf_guest;
-
- if (vcpu->arch.exception.pending) {
- switch(vcpu->arch.exception.nr) {
- case DF_VECTOR:
- /* triple fault -> shutdown */
- set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
- return;
- case PF_VECTOR:
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
- return;
- default:
- /* replace previous exception with a new one in a hope
- that instruction re-execution will regenerate lost
- exception */
- vcpu->arch.exception.pending = false;
- break;
- }
- }
vcpu->arch.cr2 = addr;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
}
@@ -301,11 +338,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);

void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = true;
- vcpu->arch.exception.nr = nr;
- vcpu->arch.exception.error_code = error_code;
+ kvm_multiple_exception(vcpu, nr, true, error_code);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:59 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

So its possible to iommu map a memslot before making it visible to
kvm.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
include/linux/kvm_host.h | 3 +--
virt/kvm/iommu.c | 13 ++++++-------
virt/kvm/kvm_main.c | 2 +-
3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f1f78de..9af2403 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -440,8 +440,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#define KVM_IOMMU_CACHE_COHERENCY 0x1

#ifdef CONFIG_IOMMU_API
-int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
- unsigned long npages);
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index bc697a6..cf567d8 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages);

-int kvm_iommu_map_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- gfn_t gfn = base_gfn;
+ gfn_t gfn = slot->base_gfn;
+ unsigned long npages = slot->npages;
pfn_t pfn;
int i, r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
continue;

- pfn = gfn_to_pfn(kvm, gfn);
+ pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
r = iommu_map_range(domain,
gfn_to_gpa(gfn),
pfn_to_hpa(pfn),
@@ -69,7 +69,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
return 0;

unmap_pages:
- kvm_iommu_put_pages(kvm, base_gfn, i);
+ kvm_iommu_put_pages(kvm, slot->base_gfn, i);
return r;
}

@@ -81,8 +81,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
slots = kvm->memslots;

for (i = 0; i < slots->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
- slots->memslots[i].npages);
+ r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
if (r)
break;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4e2321c..87d296d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -684,7 +684,7 @@ skip_lpage:
spin_unlock(&kvm->mmu_lock);
#ifdef CONFIG_DMAR
/* map the pages in iommu page table */
- r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+ r = kvm_iommu_map_pages(kvm, memslot);
if (r)
goto out;
#endif
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:02 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Using a similar two-step procedure as for memslots.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 4 ++
arch/x86/kvm/x86.c | 60 +++++++++++++++++++++++++++++++++------
include/linux/kvm_host.h | 6 ++++
virt/kvm/kvm_main.c | 4 +-
4 files changed, 63 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7cdcb3d..6c8c7c5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -368,8 +368,12 @@ struct kvm_mem_alias {
gfn_t base_gfn;
unsigned long npages;
gfn_t target_gfn;
+#define KVM_ALIAS_INVALID 1UL
+ unsigned long flags;
};

+#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+
struct kvm_mem_aliases {
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
int naliases;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2d7c040..59f4da4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -38,6 +38,7 @@
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
+#include <linux/srcu.h>
#include <trace/events/kvm.h>
#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
@@ -2224,11 +2225,32 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
return kvm->arch.n_alloc_mmu_pages;
}

+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases;
+
+ aliases = rcu_dereference(kvm->arch.aliases);
+
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
+ if (alias->flags & KVM_ALIAS_INVALID)
+ continue;
+ if (gfn >= alias->base_gfn
+ && gfn < alias->base_gfn + alias->npages)
+ return alias->target_gfn + gfn - alias->base_gfn;
+ }
+ return gfn;
+}
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_mem_alias *alias;
- struct kvm_mem_aliases *aliases = kvm->arch.aliases;
+ struct kvm_mem_aliases *aliases;
+
+ aliases = rcu_dereference(kvm->arch.aliases);

for (i = 0; i < aliases->naliases; ++i) {
alias = &aliases->aliases[i];
@@ -2249,7 +2271,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
{
int r, n;
struct kvm_mem_alias *p;
- struct kvm_mem_aliases *aliases;
+ struct kvm_mem_aliases *aliases, *old_aliases;

r = -EINVAL;
/* General sanity checks */
@@ -2266,28 +2288,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr)
goto out;

+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out;
+
down_write(&kvm->slots_lock);
- spin_lock(&kvm->mmu_lock);

- aliases = kvm->arch.aliases;
+ /* invalidate any gfn reference in case of deletion/shrinking */
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+ aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kvm_mmu_zap_all(kvm);
+ kfree(old_aliases);
+
+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out_unlock;
+
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));

p = &aliases->aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+ p->flags &= ~(KVM_ALIAS_INVALID);

for (n = KVM_ALIAS_SLOTS; n > 0; --n)
if (aliases->aliases[n - 1].npages)
break;
aliases->naliases = n;

- spin_unlock(&kvm->mmu_lock);
- kvm_mmu_zap_all(kvm);
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(old_aliases);
+ r = 0;

+out_unlock:
up_write(&kvm->slots_lock);
-
- return 0;
-
out:
return r;
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 93bd307..20941c0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -266,6 +266,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
@@ -539,6 +541,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif

+#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+#define unalias_gfn_instantiation unalias_gfn
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQCHIP

#define KVM_MAX_IRQ_ROUTES 1024
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2bb24a8..c680f7b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -859,7 +859,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
int i;
struct kvm_memslots *slots = rcu_dereference(kvm->memslots);

- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];

@@ -896,7 +896,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;

- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:55 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Have a pointer to an allocated region inside struct kvm.

[alex: fix ppc book 3s]

Signed-off-by: Alexander Graf <***@suse.de>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 10 ++++++----
arch/powerpc/kvm/book3s.c | 2 +-
arch/x86/kvm/mmu.c | 11 ++++++-----
arch/x86/kvm/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 4 ++--
include/linux/kvm_host.h | 12 ++++++++----
virt/kvm/iommu.c | 18 ++++++++++++------
virt/kvm/kvm_main.c | 36 +++++++++++++++++++++++-------------
8 files changed, 60 insertions(+), 37 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5..1ca1dbf 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm)

static void kvm_release_vm_pages(struct kvm *kvm)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, j;
unsigned long base_gfn;

- for (i = 0; i < kvm->nmemslots; i++) {
- memslot = &kvm->memslots[i];
+ slots = kvm->memslots;
+ for (i = 0; i < slots->nmemslots; i++) {
+ memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;

for (j = 0; j < memslot->npages; j++) {
@@ -1802,7 +1804,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;

- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -1840,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n);
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 803505d..bb8873d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -865,7 +865,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,

/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];

ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4f499d7..81f84d3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -807,13 +807,14 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
{
int i, j;
int retval = 0;
+ struct kvm_memslots *slots = kvm->memslots;

/*
* If mmap_sem isn't taken, we can look the memslots with only
* the mmu_lock by skipping over the slots with userspace_addr == 0.
*/
- for (i = 0; i < kvm->nmemslots; i++) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ for (i = 0; i < slots->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
unsigned long start = memslot->userspace_addr;
unsigned long end;

@@ -3021,8 +3022,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;

- for (i = 0; i < kvm->nmemslots; i++)
- nr_pages += kvm->memslots[i].npages;
+ for (i = 0; i < kvm->memslots->nmemslots; i++)
+ nr_pages += kvm->memslots->memslots[i].npages;

nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3295,7 +3296,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
int i, j, k;

for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
+ struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i];
struct kvm_rmap_desc *d;

for (j = 0; j < m->npages; ++j) {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 74a66f0..1869879 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1503,8 +1503,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
- gfn_t base_gfn = kvm->memslots[0].base_gfn +
- kvm->memslots[0].npages - 3;
+ gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn +
+ kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
return kvm->arch.tss_addr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1bdcf26..f8e2712 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2428,7 +2428,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n);
}
@@ -5224,7 +5224,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc)
{
int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];

/*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616..782bfb1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -150,14 +150,18 @@ struct kvm_irq_routing_table {};

#endif

+struct kvm_memslots {
+ int nmemslots;
+ struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
+ KVM_PRIVATE_MEM_SLOTS];
+};
+
struct kvm {
spinlock_t mmu_lock;
spinlock_t requests_lock;
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- int nmemslots;
- struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
- KVM_PRIVATE_MEM_SLOTS];
+ struct kvm_memslots *memslots;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
@@ -482,7 +486,7 @@ static inline void kvm_guest_exit(void)

static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return slot - kvm->memslots;
+ return slot - kvm->memslots->memslots;
}

static inline gpa_t gfn_to_gpa(gfn_t gfn)
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 1514758..bc697a6 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -76,10 +76,13 @@ unmap_pages:
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, r = 0;
+ struct kvm_memslots *slots;

- for (i = 0; i < kvm->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; i++) {
+ r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
if (r)
break;
}
@@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots;
+
+ slots = kvm->memslots;

- for (i = 0; i < kvm->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
}

return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bc23b8e..86dd8f3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif

+ r = -ENOMEM;
+ kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!kvm->memslots)
+ goto out_err;
+
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
+ if (!page)
goto out_err;
- }
+
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
@@ -416,6 +420,7 @@ out:
out_err:
hardware_disable_all();
out_err_nodisable:
+ kfree(kvm->memslots);
kfree(kvm);
return ERR_PTR(r);
}
@@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; ++i)
+ kvm_free_physmem_slot(&slots->memslots[i], NULL);

- for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+ kfree(kvm->memslots);
}

static void kvm_destroy_vm(struct kvm *kvm)
@@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;

- memslot = &kvm->memslots[mem->slot];
+ memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;

@@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots[i];
+ struct kvm_memory_slot *s = &kvm->memslots->memslots[i];

if (s == memslot || !s->npages)
continue;
@@ -656,8 +664,8 @@ skip_lpage:
kvm_arch_flush_shadow(kvm);

spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->nmemslots)
- kvm->nmemslots = mem->slot + 1;
+ if (mem->slot >= kvm->memslots->nmemslots)
+ kvm->memslots->nmemslots = mem->slot + 1;

*memslot = new;
spin_unlock(&kvm->mmu_lock);
@@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;

- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;

- for (i = 0; i < kvm->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ for (i = 0; i < slots->nmemslots; ++i) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];

if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;

gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ struct kvm_memory_slot *memslot = &slots->memslots[i];

if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:00 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Use two steps for memslot deletion: mark the slot invalid (which stops
instantiation of new shadow pages for that slot, but allows destruction),
then instantiate the new empty slot.

Also simplifies kvm_handle_hva locking.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 4 +-
arch/x86/kvm/mmu.c | 28 +++++-----
arch/x86/kvm/vmx.c | 6 ++-
include/linux/kvm.h | 2 +-
include/linux/kvm_host.h | 7 +--
virt/kvm/assigned-dev.c | 8 +-
virt/kvm/iommu.c | 4 +-
virt/kvm/kvm_main.c | 141 ++++++++++++++++++++++++++++++++++------------
8 files changed, 136 insertions(+), 64 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0757c70..b2e4d16 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1382,7 +1382,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
int i, j;
unsigned long base_gfn;

- slots = kvm->memslots;
+ slots = rcu_dereference(kvm->memslots);
for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
@@ -1837,6 +1837,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;

+ down_write(&kvm->slots_lock);
spin_lock(&kvm->arch.dirty_log_lock);

r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1856,6 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
}
r = 0;
out:
+ up_write(&kvm->slots_lock);
spin_unlock(&kvm->arch.dirty_log_lock);
return r;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 81f84d3..f8bf42a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -29,6 +29,7 @@
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
+#include <linux/srcu.h>

#include <asm/page.h>
#include <asm/cmpxchg.h>
@@ -807,21 +808,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
{
int i, j;
int retval = 0;
- struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memslots *slots;
+
+ slots = rcu_dereference(kvm->memslots);

- /*
- * If mmap_sem isn't taken, we can look the memslots with only
- * the mmu_lock by skipping over the slots with userspace_addr == 0.
- */
for (i = 0; i < slots->nmemslots; i++) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
unsigned long start = memslot->userspace_addr;
unsigned long end;

- /* mmu_lock protects userspace_addr */
- if (!start)
- continue;
-
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
@@ -1617,7 +1612,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)

static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
- int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
+ int slot = memslot_id(kvm, gfn);
struct kvm_mmu_page *sp = page_header(__pa(pte));

__set_bit(slot, sp->slot_bitmap);
@@ -3021,9 +3016,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
int i;
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
+ struct kvm_memslots *slots;

- for (i = 0; i < kvm->memslots->nmemslots; i++)
- nr_pages += kvm->memslots->memslots[i].npages;
+ slots = rcu_dereference(kvm->memslots);
+ for (i = 0; i < slots->nmemslots; i++)
+ nr_pages += slots->memslots[i].npages;

nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3293,10 +3290,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
int nmaps = 0;
- int i, j, k;
+ int i, j, k, idx;

+ idx = srcu_read_lock(&kvm->srcu);
+ slots = rcu_dereference(kvm->memslots);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i];
+ struct kvm_memory_slot *m = &slots->memslots[i];
struct kvm_rmap_desc *d;

for (j = 0; j < m->npages; ++j) {
@@ -3319,6 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
}
}
}
+ srcu_read_unlock(&kvm->srcu, idx);
return nmaps;
}

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1869879..f1cae7d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1503,7 +1503,11 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
- gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn +
+ struct kvm_memslots *slots;
+ gfn_t base_gfn;
+
+ slots = rcu_dereference(kvm->memslots);
+ base_gfn = kvm->memslots->memslots[0].base_gfn +
kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a24de0b..f2feef6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -103,7 +103,7 @@ struct kvm_userspace_memory_region {

/* for kvm_memory_region::flags */
#define KVM_MEM_LOG_DIRTY_PAGES 1UL
-
+#define KVM_MEMSLOT_INVALID (1UL << 1)

/* for KVM_IRQ_LINE */
struct kvm_irq_level {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9af2403..93bd307 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,6 +162,7 @@ struct kvm {
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
+ struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
@@ -275,6 +276,7 @@ void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
+int memslot_id(struct kvm *kvm, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -490,11 +492,6 @@ static inline void kvm_guest_exit(void)
current->flags &= ~PF_VCPU;
}

-static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- return slot - kvm->memslots->memslots;
-}
-
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
return (gpa_t)gfn << PAGE_SHIFT;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index f73de63..f51e684 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -504,12 +504,12 @@ out:
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
- int r = 0;
+ int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;

mutex_lock(&kvm->lock);
- down_read(&kvm->slots_lock);
+ idx = srcu_read_lock(&kvm->srcu);

match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
@@ -573,7 +573,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
}

out:
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
out_list_del:
@@ -585,7 +585,7 @@ out_put:
pci_dev_put(dev);
out_free:
kfree(match);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
}
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index cf567d8..65a5143 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -78,7 +78,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
int i, r = 0;
struct kvm_memslots *slots;

- slots = kvm->memslots;
+ slots = rcu_dereference(kvm->memslots);

for (i = 0; i < slots->nmemslots; i++) {
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
@@ -214,7 +214,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
int i;
struct kvm_memslots *slots;

- slots = kvm->memslots;
+ slots = rcu_dereference(kvm->memslots);

for (i = 0; i < slots->nmemslots; i++) {
kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 87d296d..2bb24a8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -44,6 +44,7 @@
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
+#include <linux/srcu.h>

#include <asm/processor.h>
#include <asm/io.h>
@@ -213,7 +214,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush;
+ int need_tlb_flush, idx;

/*
* When ->invalidate_page runs, the linux pte has been zapped
@@ -233,10 +234,12 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);

/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -250,11 +253,14 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;

+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
kvm_set_spte_hva(kvm, address, pte);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}

static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
@@ -263,8 +269,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush = 0;
+ int need_tlb_flush = 0, idx;

+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
@@ -275,6 +282,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);

/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -312,11 +320,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int young;
+ int young, idx;

+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);

if (young)
kvm_flush_remote_tlbs(kvm);
@@ -379,11 +389,15 @@ static struct kvm *kvm_create_vm(void)
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err;
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err;

#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ if (!page) {
+ cleanup_srcu_struct(&kvm->srcu);
goto out_err;
+ }

kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
@@ -391,6 +405,7 @@ static struct kvm *kvm_create_vm(void)

r = kvm_init_mmu_notifier(kvm);
if (r) {
+ cleanup_srcu_struct(&kvm->srcu);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
#endif
@@ -480,6 +495,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
#else
kvm_arch_flush_shadow(kvm);
#endif
+ cleanup_srcu_struct(&kvm->srcu);
kvm_arch_destroy_vm(kvm);
hardware_disable_all();
mmdrop(mm);
@@ -521,12 +537,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int r;
+ int r, flush_shadow = 0;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
+ struct kvm_memslots *slots, *old_memslots;

r = -EINVAL;
/* General sanity checks */
@@ -588,15 +605,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.rmap, 0, npages * sizeof(*new.rmap));

new.user_alloc = user_alloc;
- /*
- * hva_to_rmmap() serialzies with the mmu_lock and to be
- * safe it has to ignore memslots with !user_alloc &&
- * !userspace_addr.
- */
- if (user_alloc)
- new.userspace_addr = mem->userspace_addr;
- else
- new.userspace_addr = 0;
+ new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
@@ -651,8 +660,9 @@ skip_lpage:
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ /* destroy any largepage mappings for dirty tracking */
if (old.npages)
- kvm_arch_flush_shadow(kvm);
+ flush_shadow = 1;
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
@@ -660,34 +670,72 @@ skip_lpage:
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */

- if (!npages)
+ if (!npages) {
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+ slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ /* From this point no new shadow pages pointing to a deleted
+ * memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_roots)
+ */
kvm_arch_flush_shadow(kvm);
+ kfree(old_memslots);
+ }

r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;

- spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->memslots->nmemslots)
- kvm->memslots->nmemslots = mem->slot + 1;
+#ifdef CONFIG_DMAR
+ /* map the pages in iommu page table */
+ if (npages) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ if (r)
+ goto out_free;
+ }
+#endif

- *memslot = new;
- spin_unlock(&kvm->mmu_lock);
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (!npages) {
+ new.rmap = NULL;
+ new.dirty_bitmap = NULL;
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
+ new.lpage_info[i] = NULL;
+ }
+
+ slots->memslots[mem->slot] = new;
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);

kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);

- kvm_free_physmem_slot(&old, npages ? &new : NULL);
- /* Slot deletion case: we have to update the current slot */
- spin_lock(&kvm->mmu_lock);
- if (!npages)
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
-#ifdef CONFIG_DMAR
- /* map the pages in iommu page table */
- r = kvm_iommu_map_pages(kvm, memslot);
- if (r)
- goto out;
-#endif
+ kvm_free_physmem_slot(&old, &new);
+ kfree(old_memslots);
+
+ if (flush_shadow)
+ kvm_arch_flush_shadow(kvm);
+
return 0;

out_free:
@@ -787,7 +835,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);

for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -809,12 +857,15 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);

gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];

+ if (memslot->flags & KVM_MEMSLOT_INVALID)
+ continue;
+
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
return 1;
@@ -823,13 +874,31 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

+int memslot_id(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memory_slot *memslot = NULL;
+
+ gfn = unalias_gfn(kvm, gfn);
+ for (i = 0; i < slots->nmemslots; ++i) {
+ memslot = &slots->memslots[i];
+
+ if (gfn >= memslot->base_gfn
+ && gfn < memslot->base_gfn + memslot->npages)
+ break;
+ }
+
+ return memslot - slots->memslots;
+}
+
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;

gfn = unalias_gfn(kvm, gfn);
slot = gfn_to_memslot_unaliased(kvm, gfn);
- if (!slot)
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:57 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Required for SRCU convertion later.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 16 ++++++++++---
arch/powerpc/kvm/powerpc.c | 18 ++++++++++++---
arch/s390/kvm/kvm-s390.c | 25 +++++++++++++-------
arch/x86/kvm/x86.c | 51 +++++++++++++++++++++++++-------------------
include/linux/kvm_host.h | 7 +++++-
virt/kvm/kvm_main.c | 12 ++++------
6 files changed, 82 insertions(+), 47 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 1ca1dbf..0757c70 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1578,15 +1578,15 @@ out:
return r;
}

-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
unsigned long i;
unsigned long pfn;
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ int npages = memslot->npages;
unsigned long base_gfn = memslot->base_gfn;

if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}

+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
kvm_flush_remote_tlbs(kvm);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f06cf93..4633e78 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}

-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
return 0;
}

+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f8bcaef..8bedd31 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -689,14 +689,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}

/* Section: memory related */
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
-
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
@@ -719,14 +717,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;

+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
-
- return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6a3593f..6a795be 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5229,13 +5229,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm);
}

-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
+ int npages = memslot->npages;

/*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case.
@@ -5255,26 +5255,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);

- /* set userspace_addr atomically for kvm_hva_to_rmapp */
- spin_lock(&kvm->mmu_lock);
memslot->userspace_addr = userspace_addr;
- spin_unlock(&kvm->mmu_lock);
- } else {
- if (!old.user_alloc && old.rmap) {
- int ret;
-
- down_write(&current->mm->mmap_sem);
- ret = do_munmap(current->mm, old.userspace_addr,
- old.npages * PAGE_SIZE);
- up_write(&current->mm->mmap_sem);
- if (ret < 0)
- printk(KERN_WARNING
- "kvm_vm_ioctl_set_memory_region: "
- "failed to munmap memory\n");
- }
}
}

+
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+
+ int npages = mem->memory_size >> PAGE_SHIFT;
+
+ if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, old.userspace_addr,
+ old.npages * PAGE_SIZE);
+ up_write(&current->mm->mmap_sem);
+ if (ret < 0)
+ printk(KERN_WARNING
+ "kvm_vm_ioctl_set_memory_region: "
+ "failed to munmap memory\n");
+ }
+
spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5283,8 +5292,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,

kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock);
-
- return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 782bfb1..3c44687 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
-int kvm_arch_set_memory_region(struct kvm *kvm,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
+void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 86dd8f3..c9f6cfe 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -663,6 +663,10 @@ skip_lpage:
if (!npages)
kvm_arch_flush_shadow(kvm);

+ r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
+ if (r)
+ goto out_free;
+
spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->memslots->nmemslots)
kvm->memslots->nmemslots = mem->slot + 1;
@@ -670,13 +674,7 @@ skip_lpage:
*memslot = new;
spin_unlock(&kvm->mmu_lock);

- r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
- if (r) {
- spin_lock(&kvm->mmu_lock);
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
- goto out_free;
- }
+ kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);

kvm_free_physmem_slot(&old, npages ? &new : NULL);
/* Slot deletion case: we have to update the current slot */
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:53 UTC
Permalink
From: Alexander Graf <***@suse.de>

Progress on KVM for Embedded PowerPC has stalled, but for Book3S there's quite
a lot of work to do and going on.

So in agreement with Hollis and Avi, we should switch maintainers for PowerPC.

Signed-off-by: Alexander Graf <***@suse.de>
Acked-by: Hollis Blanchard <***@penguinppc.org>
Signed-off-by: Avi Kivity <***@redhat.com>
---
MAINTAINERS | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c1..6c99c00 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3136,7 +3136,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c

KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M: Hollis Blanchard <***@us.ibm.com>
+M: Alexander Graf <***@suse.de>
L: kvm-***@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:08 UTC
Permalink
When the guest acknowledges an interrupt, it sends an EOI message to the local
apic, which broadcasts it to the ioapic. To handle the EOI, we need to take
the ioapic mutex.

On large guests, this causes a lot of contention on this mutex. Since large
guests usually don't route interrupts via the ioapic (they use msi instead),
this is completely unnecessary.

Avoid taking the mutex by introducing a handled_vectors bitmap. Before taking
the mutex, check if the ioapic was actually responsible for the acked vector.
If not, we can return early.

Signed-off-by: Avi Kivity <***@redhat.com>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
virt/kvm/ioapic.c | 19 +++++++++++++++++++
virt/kvm/ioapic.h | 1 +
2 files changed, 20 insertions(+), 0 deletions(-)

diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index f01392f..a2edfd1 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -100,6 +100,19 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
return injected;
}

+static void update_handled_vectors(struct kvm_ioapic *ioapic)
+{
+ DECLARE_BITMAP(handled_vectors, 256);
+ int i;
+
+ memset(handled_vectors, 0, sizeof(handled_vectors));
+ for (i = 0; i < IOAPIC_NUM_PINS; ++i)
+ __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
+ memcpy(ioapic->handled_vectors, handled_vectors,
+ sizeof(handled_vectors));
+ smp_wmb();
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -134,6 +147,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
+ update_handled_vectors(ioapic);
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
@@ -241,6 +255,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;

+ smp_rmb();
+ if (!test_bit(vector, ioapic->handled_vectors))
+ return;
mutex_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
mutex_unlock(&ioapic->lock);
@@ -352,6 +369,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->ioregsel = 0;
ioapic->irr = 0;
ioapic->id = 0;
+ update_handled_vectors(ioapic);
}

static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -401,6 +419,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)

mutex_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
+ update_handled_vectors(ioapic);
mutex_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 419c43b..a505ce9 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -46,6 +46,7 @@ struct kvm_ioapic {
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
struct mutex lock;
+ DECLARE_BITMAP(handled_vectors, 256);
};

#ifdef DEBUG
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:51 UTC
Permalink
From: Alexander Graf <***@suse.de>

We treated the DEC interrupt like an edge based one. This is not true for
Book3s. The DEC keeps firing until mtdec is issued again and thus clears
the interrupt line.

So let's implement this logic in KVM too. This patch moves the line clearing
from the firing of the interrupt to the mtdec emulation.

This makes PPC64 guests work without AGGRESSIVE_DEC defined.

Signed-off-by: Alexander Graf <***@suse.de>
Acked-by: Acked-by: Hollis Blanchard <***@penguinppc.org>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/powerpc/include/asm/kvm_ppc.h | 1 +
arch/powerpc/kvm/book3s.c | 16 +++++++++++++++-
arch/powerpc/kvm/booke.c | 5 +++++
arch/powerpc/kvm/emulate.c | 3 +++
4 files changed, 24 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 269ee46..abfd0c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -82,6 +82,7 @@ extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 241795b..fd3ad6c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -151,6 +151,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
return prio;
}

+static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec)
+{
+ clear_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
+}
+
void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
{
vcpu->stat.queue_intr++;
@@ -178,6 +185,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
}

+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -275,7 +287,9 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
#endif
priority = __ffs(*pending);
while (priority <= (sizeof(unsigned int) * 8)) {
- if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) {
+ if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
+ (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
+ /* DEC interrupts get cleared by mtdec */
clear_bit(priority, &vcpu->arch.pending_exceptions);
break;
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06f5a9e..d8b6342 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -97,6 +97,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}

+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 4a9ac66..303457b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)

pr_debug("mtDEC: %x\n", vcpu->arch.dec);
#ifdef CONFIG_PPC64
+ /* mtdec lowers the interrupt line when positive. */
+ kvmppc_core_dequeue_dec(vcpu);
+
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if (vcpu->arch.dec & 0x80000000) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:23:09 UTC
Permalink
From: Alexander Graf <***@suse.de>

When we're loading bolted entries into the SLB again, we're checking if an
entry is in use and only slbmte it when it is.

Unfortunately, the check always goes to the skip label of the first entry,
resulting in an endless loop when it actually gets triggered.

Signed-off-by: Alexander Graf <***@suse.de>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/powerpc/kvm/book3s_64_slb.S | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index ecd237a..8e44788 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -31,7 +31,7 @@
#define REBOLT_SLB_ENTRY(num) \
ld r10, SHADOW_SLB_ESID(num)(r11); \
cmpdi r10, 0; \
- beq slb_exit_skip_1; \
+ beq slb_exit_skip_ ## num; \
oris r10, r10, ***@h; \
ld r9, SHADOW_SLB_VSID(num)(r11); \
slbmte r9, r10; \
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:07 UTC
Permalink
Some exit reasons missed their strings; fill out the table.

Signed-off-by: Avi Kivity <***@redhat.com>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/kvm/vmx.c | 58 +++++++++++++++++++++++++++++++++++-----------------
1 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f04e2ff..f4486f4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3993,29 +3993,49 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return ret;
}

+#define _ER(x) { EXIT_REASON_##x, #x }
+
static const struct trace_print_flags vmx_exit_reasons_str[] = {
- { EXIT_REASON_EXCEPTION_NMI, "exception" },
- { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
- { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
- { EXIT_REASON_NMI_WINDOW, "nmi_window" },
- { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
- { EXIT_REASON_CR_ACCESS, "cr_access" },
- { EXIT_REASON_DR_ACCESS, "dr_access" },
- { EXIT_REASON_CPUID, "cpuid" },
- { EXIT_REASON_MSR_READ, "rdmsr" },
- { EXIT_REASON_MSR_WRITE, "wrmsr" },
- { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
- { EXIT_REASON_HLT, "halt" },
- { EXIT_REASON_INVLPG, "invlpg" },
- { EXIT_REASON_VMCALL, "hypercall" },
- { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
- { EXIT_REASON_APIC_ACCESS, "apic_access" },
- { EXIT_REASON_WBINVD, "wbinvd" },
- { EXIT_REASON_TASK_SWITCH, "task_switch" },
- { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
+ _ER(EXCEPTION_NMI),
+ _ER(EXTERNAL_INTERRUPT),
+ _ER(TRIPLE_FAULT),
+ _ER(PENDING_INTERRUPT),
+ _ER(NMI_WINDOW),
+ _ER(TASK_SWITCH),
+ _ER(CPUID),
+ _ER(HLT),
+ _ER(INVLPG),
+ _ER(RDPMC),
+ _ER(RDTSC),
+ _ER(VMCALL),
+ _ER(VMCLEAR),
+ _ER(VMLAUNCH),
+ _ER(VMPTRLD),
+ _ER(VMPTRST),
+ _ER(VMREAD),
+ _ER(VMRESUME),
+ _ER(VMWRITE),
+ _ER(VMOFF),
+ _ER(VMON),
+ _ER(CR_ACCESS),
+ _ER(DR_ACCESS),
+ _ER(IO_INSTRUCTION),
+ _ER(MSR_READ),
+ _ER(MSR_WRITE),
+ _ER(MWAIT_INSTRUCTION),
+ _ER(MONITOR_INSTRUCTION),
+ _ER(PAUSE_INSTRUCTION),
+ _ER(MCE_DURING_VMENTRY),
+ _ER(TPR_BELOW_THRESHOLD),
+ _ER(APIC_ACCESS),
+ _ER(EPT_VIOLATION),
+ _ER(EPT_MISCONFIG),
+ _ER(WBINVD),
{ -1, NULL }
};

+#undef _ER
+
static bool vmx_gb_page_enable(void)
{
return false;
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:58 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Which takes a memslot pointer instead of using kvm->memslots.

To be used by SRCU convertion later.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 33 +++++++++++++++++++++++++--------
2 files changed, 27 insertions(+), 8 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3c44687..f1f78de 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -273,6 +273,8 @@ void kvm_set_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);

pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c9f6cfe..4e2321c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -835,21 +835,14 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_hva);

-pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
{
struct page *page[1];
- unsigned long addr;
int npages;
pfn_t pfn;

might_sleep();

- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr)) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
- }
-
npages = get_user_pages_fast(addr, 1, 1, page);

if (unlikely(npages != 1)) {
@@ -874,8 +867,32 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
return pfn;
}

+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+{
+ unsigned long addr;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr)) {
+ get_page(bad_page);
+ return page_to_pfn(bad_page);
+ }
+
+ return hva_to_pfn(kvm, addr);
+}
EXPORT_SYMBOL_GPL(gfn_to_pfn);

+static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+}
+
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ unsigned long addr = gfn_to_hva_memslot(slot, gfn);
+ return hva_to_pfn(kvm, addr);
+}
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
pfn_t pfn;
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:23:06 UTC
Permalink
With slots_lock converted to rcu, the entire kvm hotpath on modern processors
(with npt or ept) now scales beautifully. Increase the maximum vcpu count to
64 to reflect this.

Signed-off-by: Avi Kivity <***@redhat.com>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6c8c7c5..741b897 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,7 +25,7 @@
#include <asm/mtrr.h>
#include <asm/msr-index.h>

-#define KVM_MAX_VCPUS 16
+#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
--
1.6.5.3
Avi Kivity
2010-02-10 17:23:01 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 41 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6a795be..2d7c040 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2416,27 +2416,60 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r;
- int n;
+ int r, n, i;
struct kvm_memory_slot *memslot;
- int is_dirty = 0;
+ unsigned long is_dirty = 0;
+ unsigned long *dirty_bitmap = NULL;

down_write(&kvm->slots_lock);

- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+
+ r = -ENOMEM;
+ dirty_bitmap = vmalloc(n);
+ if (!dirty_bitmap)
goto out;
+ memset(dirty_bitmap, 0, n);
+
+ for (i = 0; !is_dirty && i < n/sizeof(long); i++)
+ is_dirty = memslot->dirty_bitmap[i];

/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
+ struct kvm_memslots *slots, *old_slots;
+
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
- memslot = &kvm->memslots->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- memset(memslot->dirty_bitmap, 0, n);
+
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+
+ old_slots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
+ kfree(old_slots);
}
+
r = 0;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
+ r = -EFAULT;
+out_free:
+ vfree(dirty_bitmap);
out:
up_write(&kvm->slots_lock);
return r;
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:52 UTC
Permalink
From: Alexander Graf <***@suse.de>

Because we now emulate the DEC interrupt according to real life behavior,
there's no need to keep the AGGRESSIVE_DEC hack around.

Let's just remove it.

Signed-off-by: Alexander Graf <***@suse.de>
Acked-by: Acked-by: Hollis Blanchard <***@penguinppc.org>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/powerpc/kvm/book3s.c | 16 +---------------
1 files changed, 1 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index fd3ad6c..803505d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,12 +34,6 @@
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */

-/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0.
- * When set, we retrigger a DEC interrupt after that if DEC <= 0.
- * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
-
-/* #define AGGRESSIVE_DEC */
-
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
{ "mmio", VCPU_STAT(mmio_exits) },
@@ -81,7 +75,7 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
}

-#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG)
+#if defined(EXIT_DEBUG)
static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
{
u64 jd = mftb() - vcpu->arch.dec_jiffies;
@@ -273,14 +267,6 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned int priority;

- /* XXX be more clever here - no need to mftb() on every entry */
- /* Issue DEC again if it's still active */
-#ifdef AGGRESSIVE_DEC
- if (vcpu->arch.msr & MSR_EE)
- if (kvmppc_get_dec(vcpu) & 0x80000000)
- kvmppc_core_queue_dec(vcpu);
-#endif
-
#ifdef EXIT_DEBUG
if (vcpu->arch.pending_exceptions)
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:49 UTC
Permalink
- add destructor function
- move related allocation into constructor
- add stubs for !CONFIG_KVM_MMIO

Signed-off-by: Avi Kivity <***@redhat.com>
---
virt/kvm/coalesced_mmio.c | 25 +++++++++++++++++++++++--
virt/kvm/coalesced_mmio.h | 10 ++++++++++
virt/kvm/kvm_main.c | 7 +------
3 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 04d69cd..d68e6c6 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -92,11 +92,19 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
+ struct page *page;
int ret;

+ ret = -ENOMEM;
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out_err;
+ kvm->coalesced_mmio_ring = page_address(page);
+
+ ret = -ENOMEM;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ goto out_free_page;
spin_lock_init(&dev->lock);
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
@@ -104,11 +112,24 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)

ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
if (ret < 0)
- kfree(dev);
+ goto out_free_dev;
+
+ return ret;

+out_free_dev:
+ kfree(dev);
+out_free_page:
+ __free_page(page);
+out_err:
return ret;
}

+void kvm_coalesced_mmio_free(struct kvm *kvm)
+{
+ if (kvm->coalesced_mmio_ring)
+ free_page((unsigned long)kvm->coalesced_mmio_ring);
+}
+
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index e703346..8a5959e 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -10,6 +10,8 @@
*
*/

+#ifdef CONFIG_KVM_MMIO
+
#define KVM_COALESCED_MMIO_ZONE_MAX 100

struct kvm_coalesced_mmio_dev {
@@ -21,9 +23,17 @@ struct kvm_coalesced_mmio_dev {
};

int kvm_coalesced_mmio_init(struct kvm *kvm);
+void kvm_coalesced_mmio_free(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);

+#else
+
+static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
+static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { }
+
+#endif
+
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c10d117..bc23b8e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,9 +51,7 @@
#include <asm/pgtable.h>
#include <asm-generic/bitops/le.h>

-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
-#endif

#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
@@ -468,10 +466,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_free_irq_routing(kvm);
kvm_io_bus_destroy(&kvm->pio_bus);
kvm_io_bus_destroy(&kvm->mmio_bus);
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- if (kvm->coalesced_mmio_ring != NULL)
- free_page((unsigned long)kvm->coalesced_mmio_ring);
-#endif
+ kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:43 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

Before enabling, execution of "rdtscp" in guest would result in #UD.

Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/include/asm/vmx.h | 1 +
arch/x86/kvm/svm.c | 7 ++++
arch/x86/kvm/vmx.c | 60 +++++++++++++++++++++++++++++++++++++--
arch/x86/kvm/x86.c | 3 +-
5 files changed, 68 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7ff0ea3..fe4df46 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -521,6 +521,7 @@ struct kvm_x86_ops {
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
bool (*gb_page_enable)(void);
+ bool (*rdtscp_supported)(void);

const struct trace_print_flags *exit_reasons_str;
};
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 8f6b011..713ed9a 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -53,6 +53,7 @@
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_RDTSCP 0x00000008
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 41777e6..7f4e225 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2914,6 +2914,11 @@ static bool svm_gb_page_enable(void)
return true;
}

+static bool svm_rdtscp_supported(void)
+{
+ return false;
+}
+
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -2982,6 +2987,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.gb_page_enable = svm_gb_page_enable,

.cpuid_update = svm_cpuid_update,
+
+ .rdtscp_supported = svm_rdtscp_supported,
};

static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 75e8931..74a66f0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -151,6 +151,8 @@ struct vcpu_vmx {
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason;
+
+ bool rdtscp_enabled;
};

static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -225,7 +227,7 @@ static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
- MSR_EFER, MSR_K6_STAR,
+ MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)

@@ -362,6 +364,12 @@ static inline int cpu_has_vmx_vpid(void)
SECONDARY_EXEC_ENABLE_VPID;
}

+static inline int cpu_has_vmx_rdtscp(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDTSCP;
+}
+
static inline int cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -893,6 +901,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
}

+static bool vmx_rdtscp_supported(void)
+{
+ return cpu_has_vmx_rdtscp();
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
@@ -928,6 +941,9 @@ static void setup_msrs(struct vcpu_vmx *vmx)
index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_TSC_AUX);
+ if (index >= 0 && vmx->rdtscp_enabled)
+ move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
@@ -1017,6 +1033,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_TSC_AUX:
+ if (!to_vmx(vcpu)->rdtscp_enabled)
+ return 1;
+ /* Otherwise falls through */
default:
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index);
@@ -1080,7 +1100,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vcpu->arch.pat = data;
break;
}
- /* Otherwise falls through to kvm_set_msr_common */
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ break;
+ case MSR_TSC_AUX:
+ if (!vmx->rdtscp_enabled)
+ return 1;
+ /* Check reserved bit, higher 32 bits should be zero */
+ if ((data >> 32) != 0)
+ return 1;
+ /* Otherwise falls through */
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -1260,7 +1288,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+ SECONDARY_EXEC_RDTSCP;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -3988,8 +4017,31 @@ static bool vmx_gb_page_enable(void)
return false;
}

+static inline u32 bit(int bitno)
+{
+ return 1 << (bitno & 31);
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpuid_entry2 *best;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control;
+
+ vmx->rdtscp_enabled = false;
+ if (vmx_rdtscp_supported()) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ if (exec_control & SECONDARY_EXEC_RDTSCP) {
+ best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
+ vmx->rdtscp_enabled = true;
+ else {
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ }
+ }
+ }
}

static struct kvm_x86_ops vmx_x86_ops = {
@@ -4058,6 +4110,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.gb_page_enable = vmx_gb_page_enable,

.cpuid_update = vmx_cpuid_update,
+
+ .rdtscp_supported = vmx_rdtscp_supported,
};

static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 153f4a1..1bdcf26 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1647,6 +1647,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
#else
unsigned f_lm = 0;
#endif
+ unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;

/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -1666,7 +1667,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ |
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
+ F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
/* cpuid 1.ecx */
const u32 kvm_supported_word4_x86_features =
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:47 UTC
Permalink
Signed-off-by: Avi Kivity <***@redhat.com>
---
virt/kvm/kvm_main.c | 20 +++++++++++++++-----
1 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 92f5c98..c10d117 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -341,6 +341,20 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+}
+
+#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

static struct kvm *kvm_create_vm(void)
@@ -373,11 +387,7 @@ static struct kvm *kvm_create_vm(void)
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif

-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
-#endif
-
+ r = kvm_init_mmu_notifier(kvm);
if (r) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:50 UTC
Permalink
From: Alexander Graf <***@suse.de>

We're using a switch table to find the irqprio that belongs to a specific
interrupt vector. This table is part of the interrupt inject logic.

Since we'll add a new function to stop interrupts, let's move this table
out of the injection logic into a separate function.

Signed-off-by: Alexander Graf <***@suse.de>
Acked-by: Acked-by: Hollis Blanchard <***@penguinppc.org>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/powerpc/kvm/book3s.c | 13 ++++++++++---
1 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd..241795b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -125,11 +125,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
vcpu->arch.mmu.reset_msr(vcpu);
}

-void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+static int kvmppc_book3s_vec2irqprio(unsigned int vec)
{
unsigned int prio;

- vcpu->stat.queue_intr++;
switch (vec) {
case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
@@ -149,7 +148,15 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
default: prio = BOOK3S_IRQPRIO_MAX; break;
}

- set_bit(prio, &vcpu->arch.pending_exceptions);
+ return prio;
+}
+
+void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+{
+ vcpu->stat.queue_intr++;
+
+ set_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
#ifdef EXIT_DEBUG
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:56 UTC
Permalink
From: Marcelo Tosatti <***@redhat.com>

Have a pointer to an allocated region inside x86's kvm_arch.

Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 8 ++++++--
arch/x86/kvm/x86.c | 21 ++++++++++++++++-----
2 files changed, 22 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fe4df46..7cdcb3d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -370,9 +370,13 @@ struct kvm_mem_alias {
gfn_t target_gfn;
};

-struct kvm_arch{
- int naliases;
+struct kvm_mem_aliases {
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+ int naliases;
+};
+
+struct kvm_arch {
+ struct kvm_mem_aliases *aliases;

unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f8e2712..6a3593f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2228,9 +2228,10 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases = kvm->arch.aliases;

- for (i = 0; i < kvm->arch.naliases; ++i) {
- alias = &kvm->arch.aliases[i];
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
if (gfn >= alias->base_gfn
&& gfn < alias->base_gfn + alias->npages)
return alias->target_gfn + gfn - alias->base_gfn;
@@ -2248,6 +2249,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
{
int r, n;
struct kvm_mem_alias *p;
+ struct kvm_mem_aliases *aliases;

r = -EINVAL;
/* General sanity checks */
@@ -2267,15 +2269,17 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
down_write(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);

- p = &kvm->arch.aliases[alias->slot];
+ aliases = kvm->arch.aliases;
+
+ p = &aliases->aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;

for (n = KVM_ALIAS_SLOTS; n > 0; --n)
- if (kvm->arch.aliases[n - 1].npages)
+ if (aliases->aliases[n - 1].npages)
break;
- kvm->arch.naliases = n;
+ aliases->naliases = n;

spin_unlock(&kvm->mmu_lock);
kvm_mmu_zap_all(kvm);
@@ -5159,6 +5163,12 @@ struct kvm *kvm_arch_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);

+ kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!kvm->arch.aliases) {
+ kfree(kvm);
+ return ERR_PTR(-ENOMEM);
+ }
+
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);

@@ -5215,6 +5225,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
+ kfree(kvm->arch.aliases);
kfree(kvm);
}
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:54 UTC
Permalink
From: Wu Fengguang <***@intel.com>

Signed-off-by: Wu Fengguang <***@intel.com>
Signed-off-by: Marcelo Tosatti <***@redhat.com>
---
Documentation/kvm/api.txt | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 2811e45..c6416a3 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -23,12 +23,12 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.

-2. File descritpors
+2. File descriptors

The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
-handle will create a VM file descripror which can be used to issue VM
+handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
and return a file descriptor pointing to it. Finally, ioctls on a vcpu
fd can be used to control the vcpu, including the important task of
@@ -643,7 +643,7 @@ Type: vm ioctl
Parameters: struct kvm_clock_data (in)
Returns: 0 on success, -1 on error

-Sets the current timestamp of kvmclock to the valued specific in its parameter.
+Sets the current timestamp of kvmclock to the value specified in its parameter.
In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.

@@ -795,11 +795,11 @@ Unused.
__u64 data_offset; /* relative to kvm_run start */
} io;

-If exit_reason is KVM_EXIT_IO_IN or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_IO, then the vcpu has
executed a port I/O instruction which could not be satisfied by kvm.
data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
where kvm expects application code to place the data for the next
-KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a patcked array.
+KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.

struct {
struct kvm_debug_exit_arch arch;
@@ -815,7 +815,7 @@ Unused.
__u8 is_write;
} mmio;

-If exit_reason is KVM_EXIT_MMIO or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_MMIO, then the vcpu has
executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:45 UTC
Permalink
They aren't related.

Signed-off-by: Avi Kivity <***@redhat.com>
---
virt/kvm/kvm_main.c | 18 +++++++-----------
1 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a944be3..92f5c98 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -374,17 +374,16 @@ static struct kvm *kvm_create_vm(void)
#endif

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- {
- kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (r) {
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+#endif
+
+ if (r) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- put_page(page);
+ put_page(page);
#endif
- goto out_err;
- }
+ goto out_err;
}
-#endif

kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
@@ -406,11 +405,8 @@ static struct kvm *kvm_create_vm(void)
out:
return kvm;

-#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
- (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
out_err:
hardware_disable_all();
-#endif
out_err_nodisable:
kfree(kvm);
return ERR_PTR(r);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:48 UTC
Permalink
s390 doesn't have mmio, this will simplify ifdefing it out.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/ia64/kvm/Kconfig | 1 +
arch/powerpc/kvm/Kconfig | 1 +
arch/x86/kvm/Kconfig | 1 +
virt/kvm/Kconfig | 3 +++
4 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index ef3e7be..bf82e47 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
select ANON_INODES
select HAVE_KVM_IRQCHIP
select KVM_APIC_ARCHITECTURE
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 6fb6e8a..be28968 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select KVM_MMIO

config KVM_BOOK3S_64_HANDLER
bool
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 4cd4983..0687111 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
select HAVE_KVM_EVENTFD
select KVM_APIC_ARCHITECTURE
select USER_RETURN_NOTIFIER
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index daece36..7f1178f 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -12,3 +12,6 @@ config HAVE_KVM_EVENTFD

config KVM_APIC_ARCHITECTURE
bool
+
+config KVM_MMIO
+ bool
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:40 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

shared_msr_global saved host value of relevant MSRs, but it have an
assumption that all MSRs it tracked shared the value across the different
CPUs. It's not true with some MSRs, e.g. MSR_TSC_AUX.

Extend it to per CPU to provide the support of MSR_TSC_AUX, and more
alike MSRs.

Notice now the shared_msr_global still have one assumption: it can only deal
with the MSRs that won't change in host after KVM module loaded.

Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/x86.c | 55 +++++++++++++++++++++++++++++++--------------------
1 files changed, 33 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd67f88..a2c8cf2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -93,16 +93,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);

struct kvm_shared_msrs_global {
int nr;
- struct kvm_shared_msr {
- u32 msr;
- u64 value;
- } msrs[KVM_NR_SHARED_MSRS];
+ u32 msrs[KVM_NR_SHARED_MSRS];
};

struct kvm_shared_msrs {
struct user_return_notifier urn;
bool registered;
- u64 current_value[KVM_NR_SHARED_MSRS];
+ struct kvm_shared_msr_values {
+ u64 host;
+ u64 curr;
+ } values[KVM_NR_SHARED_MSRS];
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +147,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
- struct kvm_shared_msr *global;
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;

for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
- global = &shared_msrs_global.msrs[slot];
- if (global->value != locals->current_value[slot]) {
- wrmsrl(global->msr, global->value);
- locals->current_value[slot] = global->value;
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+ wrmsrl(shared_msrs_global.msrs[slot], values->host);
+ values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}

-void kvm_define_shared_msr(unsigned slot, u32 msr)
+static void shared_msr_update(unsigned slot, u32 msr)
{
- int cpu;
+ struct kvm_shared_msrs *smsr;
u64 value;

+ smsr = &__get_cpu_var(shared_msrs);
+ /* only read, and nobody should modify it at this time,
+ * so don't need lock */
+ if (slot >= shared_msrs_global.nr) {
+ printk(KERN_ERR "kvm: invalid MSR slot!");
+ return;
+ }
+ rdmsrl_safe(msr, &value);
+ smsr->values[slot].host = value;
+ smsr->values[slot].curr = value;
+}
+
+void kvm_define_shared_msr(unsigned slot, u32 msr)
+{
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
- shared_msrs_global.msrs[slot].msr = msr;
- rdmsrl_safe(msr, &value);
- shared_msrs_global.msrs[slot].value = value;
- for_each_online_cpu(cpu)
- per_cpu(shared_msrs, cpu).current_value[slot] = value;
+ shared_msrs_global.msrs[slot] = msr;
+ /* we need ensured the shared_msr_global have been updated */
+ smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);

static void kvm_shared_msr_cpu_online(void)
{
unsigned i;
- struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);

for (i = 0; i < shared_msrs_global.nr; ++i)
- locals->current_value[i] = shared_msrs_global.msrs[i].value;
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
}

void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);

- if (((value ^ smsr->current_value[slot]) & mask) == 0)
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
- smsr->current_value[slot] = value;
- wrmsrl(shared_msrs_global.msrs[slot].msr, value);
+ smsr->values[slot].curr = value;
+ wrmsrl(shared_msrs_global.msrs[slot], value);
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:35 UTC
Permalink
Some bits of cr4 can be owned by the guest on vmx, so when we read them,
we copy them to the vcpu structure. In preparation for making the set of
guest-owned bits dynamic, use helpers to access these bits so we don't need
to know where the bit resides.

No changes to svm since all bits are host-owned there.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/kvm_cache_regs.h | 12 ++++++++++++
arch/x86/kvm/mmu.h | 5 +++--
arch/x86/kvm/vmx.c | 13 ++++++++-----
arch/x86/kvm/x86.c | 16 ++++++----------
5 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index da6dee8..e9f4f12 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -272,6 +272,7 @@ struct kvm_vcpu_arch {
unsigned long cr2;
unsigned long cr3;
unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
unsigned long cr8;
u32 hflags;
u64 pdptrs[4]; /* pae */
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7bcc5b6..35acc36 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.pdptrs[index];
}

+static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
+{
+ if (mask & vcpu->arch.cr4_guest_owned_bits)
+ kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+ return vcpu->arch.cr4 & mask;
+}
+
+static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr4_bits(vcpu, ~0UL);
+}
+
#endif
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61a1b38..4567d80 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -2,6 +2,7 @@
#define __KVM_X86_MMU_H

#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"

#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)

static inline int is_pae(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr4 & X86_CR4_PAE;
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
}

static inline int is_pse(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr4 & X86_CR4_PSE;
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
}

static inline int is_paging(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index efbb614..284e905 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)

static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
- vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+ ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+ vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
}

static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}

if (!(cr0 & X86_CR0_WP))
@@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)

vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+ vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK;

tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this);
@@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
vcpu->arch.eff_db[dr] = val;
break;
case 4 ... 5:
- if (vcpu->arch.cr4 & X86_CR4_DE)
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
kvm_queue_exception(vcpu, UD_VECTOR);
break;
case 6:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 47fab87..fd67f88 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);

void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long old_cr4 = vcpu->arch.cr4;
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;

if (cr4 & CR4_RESERVED_BITS) {
@@ -1900,7 +1900,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
- !(vcpu->arch.cr4 & X86_CR4_MCE)) {
+ !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
printk(KERN_DEBUG "kvm: set_mce: "
"injects mce exception while "
"previous one is in progress!\n");
@@ -3617,7 +3617,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
{
unsigned long value;

- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
switch (cr) {
case 0:
value = vcpu->arch.cr0;
@@ -3629,7 +3628,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
value = vcpu->arch.cr3;
break;
case 4:
- value = vcpu->arch.cr4;
+ value = kvm_read_cr4(vcpu);
break;
case 8:
value = kvm_get_cr8(vcpu);
@@ -3657,7 +3656,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
kvm_set_cr3(vcpu, val);
break;
case 4:
- kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+ kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
kvm_set_cr8(vcpu, val & 0xfUL);
@@ -4238,11 +4237,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base;

- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
sregs->cr0 = vcpu->arch.cr0;
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3;
- sregs->cr4 = vcpu->arch.cr4;
+ sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.shadow_efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
@@ -4738,13 +4736,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);

- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
-
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;

- mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
+ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:46 UTC
Permalink
Signed-off-by: Avi Kivity <***@redhat.com>
---
virt/kvm/coalesced_mmio.h | 5 +++++
1 files changed, 5 insertions(+), 0 deletions(-)

diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 4b49f27..e703346 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -1,3 +1,6 @@
+#ifndef __KVM_COALESCED_MMIO_H__
+#define __KVM_COALESCED_MMIO_H__
+
/*
* KVM coalesced MMIO
*
@@ -22,3 +25,5 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
+
+#endif
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:39 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

It's no longer necessary.

Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/vmx.c | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 694baed..f8f2fdc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2399,14 +2399,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
for (i = 0; i < NR_VMX_MSR; ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
- u64 data;
int j = vmx->nmsrs;

if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
- data = data_low | ((u64)data_high << 32);
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Avi Kivity
2010-02-10 17:22:44 UTC
Permalink
From: Joerg Roedel <***@amd.com>

The tsc_offset adjustment in svm_vcpu_load is executed
unconditionally even if Linux considers the host tsc as
stable. This causes a Linux guest detecting an unstable tsc
in any case.
This patch removes the tsc_offset adjustment if the host tsc
is stable. The guest will now get the benefit of a stable
tsc too.

Signed-off-by: Joerg Roedel <***@amd.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/svm.c | 18 ++++++++++--------
1 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7f4e225..b373ae6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -765,14 +765,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(cpu != vcpu->cpu)) {
u64 delta;

- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- delta = vcpu->arch.host_tsc - native_read_tsc();
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
+ if (check_tsc_unstable()) {
+ /*
+ * Make sure that the guest sees a monotonically
+ * increasing TSC.
+ */
+ delta = vcpu->arch.host_tsc - native_read_tsc();
+ svm->vmcb->control.tsc_offset += delta;
+ if (is_nested(svm))
+ svm->nested.hsave->control.tsc_offset += delta;
+ }
vcpu->cpu = cpu;
kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:42 UTC
Permalink
From: Sheng Yang <***@linux.intel.com>

Sometime, we need to adjust some state in order to reflect guest CPUID
setting, e.g. if we don't expose rdtscp to guest, we won't want to enable
it on hardware. cpuid_update() is introduced for this purpose.

Also export kvm_find_cpuid_entry() for later use.

Signed-off-by: Sheng Yang <***@linux.intel.com>
Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm.c | 6 ++++++
arch/x86/kvm/vmx.c | 6 ++++++
arch/x86/kvm/x86.c | 3 +++
4 files changed, 16 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e9f4f12..7ff0ea3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -459,6 +459,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);

/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b338..41777e6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2852,6 +2852,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return 0;
}

+static void svm_cpuid_update(struct kvm_vcpu *vcpu)
+{
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
{ SVM_EXIT_READ_CR0, "read_cr0" },
{ SVM_EXIT_READ_CR3, "read_cr3" },
@@ -2976,6 +2980,8 @@ static struct kvm_x86_ops svm_x86_ops = {

.exit_reasons_str = svm_exit_reasons_str,
.gb_page_enable = svm_gb_page_enable,
+
+ .cpuid_update = svm_cpuid_update,
};

static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f8f2fdc..75e8931 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3988,6 +3988,10 @@ static bool vmx_gb_page_enable(void)
return false;
}

+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+{
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -4052,6 +4056,8 @@ static struct kvm_x86_ops vmx_x86_ops = {

.exit_reasons_str = vmx_exit_reasons_str,
.gb_page_enable = vmx_gb_page_enable,
+
+ .cpuid_update = vmx_cpuid_update,
};

static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2c8cf2..153f4a1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1575,6 +1575,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid_fix_nx_cap(vcpu);
r = 0;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);

out_free:
vfree(cpuid_entries);
@@ -1597,6 +1598,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
return 0;

out:
@@ -3734,6 +3736,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
return best;
}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);

int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:37 UTC
Permalink
We make no use of cr4.pge if ept is enabled, but the guest does (to flush
global mappings, as with vmap()), so give the guest ownership of this bit.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/vmx.c | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 755811a..a7ebaa5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2424,6 +2424,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)

vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);

tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:34 UTC
Permalink
They have no place in common code.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 13 -------------
arch/x86/kvm/vmx.c | 13 +++++++++++++
2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4f865e8..da6dee8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -38,19 +38,6 @@
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)

-#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
-#define KVM_GUEST_CR0_MASK \
- (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
-#define KVM_VM_CR0_ALWAYS_ON \
- (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_GUEST_CR4_MASK \
- (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
-#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
-#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
-
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8a8e139..efbb614 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -61,6 +61,19 @@ module_param_named(unrestricted_guest,
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);

+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK \
+ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
+#define KVM_VM_CR0_ALWAYS_ON \
+ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_GUEST_CR4_MASK \
+ (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
--
1.6.5.3
Avi Kivity
2010-02-10 17:22:38 UTC
Permalink
ept_update_paging_mode_cr4() accesses vcpu->arch.cr4 directly, which usually
needs to be accessed via kvm_read_cr4(). In this case, we can't, since cr4
is in the process of being updated. Instead of adding inane comments, fold
the function into its caller (vmx_set_cr4), so it can use the not-yet-committed
cr4 directly.

Signed-off-by: Avi Kivity <***@redhat.com>
---
arch/x86/kvm/vmx.c | 20 ++++++++------------
1 files changed, 8 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a7ebaa5..694baed 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1680,16 +1680,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
*hw_cr0 &= ~X86_CR0_WP;
}

-static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
- struct kvm_vcpu *vcpu)
-{
- if (!is_paging(vcpu)) {
- *hw_cr4 &= ~X86_CR4_PAE;
- *hw_cr4 |= X86_CR4_PSE;
- } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
- *hw_cr4 &= ~X86_CR4_PAE;
-}
-
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1767,8 +1757,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);

vcpu->arch.cr4 = cr4;
- if (enable_ept)
- ept_update_paging_mode_cr4(&hw_cr4, vcpu);
+ if (enable_ept) {
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ hw_cr4 |= X86_CR4_PSE;
+ } else if (!(cr4 & X86_CR4_PAE)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ }
+ }

vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
--
1.6.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Loading...