diff options
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 1 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/perf_test_util.c | 31 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/test_util.c | 31 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/processor.c | 144 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/svm.c | 8 |
5 files changed, 200 insertions, 15 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index fa5a90e6c6f0..d787cb802b4a 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1801,6 +1801,7 @@ static struct exit_reason { {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, {KVM_EXIT_X86_RDMSR, "RDMSR"}, {KVM_EXIT_X86_WRMSR, "WRMSR"}, + {KVM_EXIT_XEN, "XEN"}, #ifdef KVM_EXIT_MEMORY_NOT_PRESENT {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, #endif diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 9be1944c2d1c..81490b9b4e32 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -49,7 +49,8 @@ static void guest_code(uint32_t vcpu_id) } struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, - uint64_t vcpu_memory_bytes) + uint64_t vcpu_memory_bytes, + enum vm_mem_backing_src_type backing_src) { struct kvm_vm *vm; uint64_t guest_num_pages; @@ -93,8 +94,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); /* Add an extra memory slot for testing */ - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - guest_test_phys_mem, + vm_userspace_mem_region_add(vm, backing_src, guest_test_phys_mem, PERF_TEST_MEM_SLOT_INDEX, guest_num_pages, 0); @@ -112,7 +112,9 @@ void perf_test_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes) +void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, + uint64_t vcpu_memory_bytes, + bool partition_vcpu_memory_access) { vm_paddr_t vcpu_gpa; struct perf_test_vcpu_args *vcpu_args; @@ -122,13 +124,22 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_by vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; vcpu_args->vcpu_id = vcpu_id; - vcpu_args->gva = guest_test_virt_mem + - (vcpu_id * vcpu_memory_bytes); - vcpu_args->pages = vcpu_memory_bytes / - perf_test_args.guest_page_size; + if (partition_vcpu_memory_access) { + vcpu_args->gva = guest_test_virt_mem + + (vcpu_id * vcpu_memory_bytes); + vcpu_args->pages = vcpu_memory_bytes / + perf_test_args.guest_page_size; + vcpu_gpa = guest_test_phys_mem + + (vcpu_id * vcpu_memory_bytes); + } else { + vcpu_args->gva = guest_test_virt_mem; + vcpu_args->pages = (vcpus * vcpu_memory_bytes) / + perf_test_args.guest_page_size; + vcpu_gpa = guest_test_phys_mem; + } - vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes); pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes); + vcpu_id, vcpu_gpa, vcpu_gpa + + (vcpu_args->pages * perf_test_args.guest_page_size)); } } diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 8e04c0b1608e..906c955384e2 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -10,6 +10,7 @@ #include <limits.h> #include <stdlib.h> #include <time.h> +#include "linux/kernel.h" #include "test_util.h" @@ -84,7 +85,7 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) return timespec_add_ns((struct timespec){0}, ns1 - ns2); } -struct timespec timespec_diff_now(struct timespec start) +struct timespec timespec_elapsed(struct timespec start) { struct timespec end; @@ -109,3 +110,31 @@ void print_skip(const char *fmt, ...) va_end(ap); puts(", skipping test"); } + +const struct vm_mem_backing_src_alias backing_src_aliases[] = { + {"anonymous", VM_MEM_SRC_ANONYMOUS,}, + {"anonymous_thp", VM_MEM_SRC_ANONYMOUS_THP,}, + {"anonymous_hugetlb", VM_MEM_SRC_ANONYMOUS_HUGETLB,}, +}; + +void backing_src_help(void) +{ + int i; + + printf("Available backing src types:\n"); + for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) + printf("\t%s\n", backing_src_aliases[i].name); +} + +enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) + if (!strcmp(type_name, backing_src_aliases[i].name)) + return backing_src_aliases[i].type; + + backing_src_help(); + TEST_FAIL("Unknown backing src type: %s", type_name); + return -1; +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 95e1a757c629..de0c76177d02 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -670,6 +670,82 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) } /* + * KVM Get MSR + * + * Input Args: + * msr_index - Index of MSR + * + * Output Args: None + * + * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. + * + * Get value of MSR for VCPU. + */ +uint64_t kvm_get_feature_msr(uint64_t msr_index) +{ + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r, kvm_fd; + + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); + + close(kvm_fd); + return buffer.entry.data; +} + +/* + * VM VCPU CPUID Set + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU id + * + * Output Args: None + * + * Return: KVM CPUID (KVM_GET_CPUID2) + * + * Set the VCPU's CPUID. + */ +struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct kvm_cpuid2 *cpuid; + int rc, max_ent; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + + cpuid = allocate_kvm_cpuid2(); + max_ent = cpuid->nent; + + for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) { + rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid); + if (!rc) + break; + + TEST_ASSERT(rc == -1 && errno == E2BIG, + "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d", + rc, errno); + } + + TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i", + rc, errno); + + return cpuid; +} + + + +/* * Locate a cpuid entry. * * Input Args: @@ -1224,3 +1300,71 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, : "b"(a0), "c"(a1), "d"(a2), "S"(a3)); return r; } + +struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) +{ + static struct kvm_cpuid2 *cpuid; + int ret; + int kvm_fd; + + if (cpuid) + return cpuid; + + cpuid = allocate_kvm_cpuid2(); + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n", + ret, errno); + + close(kvm_fd); + return cpuid; +} + +void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + static struct kvm_cpuid2 *cpuid_full; + struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; + int i, nent = 0; + + if (!cpuid_full) { + cpuid_sys = kvm_get_supported_cpuid(); + cpuid_hv = kvm_get_supported_hv_cpuid(); + + cpuid_full = malloc(sizeof(*cpuid_full) + + (cpuid_sys->nent + cpuid_hv->nent) * + sizeof(struct kvm_cpuid_entry2)); + if (!cpuid_full) { + perror("malloc"); + abort(); + } + + /* Need to skip KVM CPUID leaves 0x400000xx */ + for (i = 0; i < cpuid_sys->nent; i++) { + if (cpuid_sys->entries[i].function >= 0x40000000 && + cpuid_sys->entries[i].function < 0x40000100) + continue; + cpuid_full->entries[nent] = cpuid_sys->entries[i]; + nent++; + } + + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); + cpuid_full->nent = nent + cpuid_hv->nent; + } + + vcpu_set_cpuid(vm, vcpuid, cpuid_full); +} + +struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + static struct kvm_cpuid2 *cpuid; + + cpuid = allocate_kvm_cpuid2(); + + vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + + return cpuid; +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c index 3a5c72ed2b79..827fe6028dd4 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -74,7 +74,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); memset(vmcb, 0, sizeof(*vmcb)); - asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory"); + asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); @@ -131,19 +131,19 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) { asm volatile ( - "vmload\n\t" + "vmload %[vmcb_gpa]\n\t" "mov rflags, %%r15\n\t" // rflags "mov %%r15, 0x170(%[vmcb])\n\t" "mov guest_regs, %%r15\n\t" // rax "mov %%r15, 0x1f8(%[vmcb])\n\t" LOAD_GPR_C - "vmrun\n\t" + "vmrun %[vmcb_gpa]\n\t" SAVE_GPR_C "mov 0x170(%[vmcb]), %%r15\n\t" // rflags "mov %%r15, rflags\n\t" "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax "mov %%r15, guest_regs\n\t" - "vmsave\n\t" + "vmsave %[vmcb_gpa]\n\t" : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) : "r15", "memory"); } |