diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-11-24 05:09:08 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-11-24 05:09:08 +0100 |
commit | 69e6cdd0cf16f645be39038e5ccc9379e3923d00 (patch) | |
tree | 3edb32b07ef37be1536f6510ec8e7caaf5acbbb8 /virt/kvm/async_pf.c | |
parent | 6a6b12e2125591e24891e6860410795ea53aed11 (diff) | |
parent | 10b9dd56860e93f11cd352e8c75a33357b80b70b (diff) |
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'virt/kvm/async_pf.c')
-rw-r--r-- | virt/kvm/async_pf.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index db9668869f6f..efeceb0a222d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -84,12 +84,14 @@ static void async_pf_execute(struct work_struct *work) * mm and might be done in another context, so we must * use FOLL_REMOTE. */ - __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); + __get_user_pages_unlocked(NULL, mm, addr, 1, NULL, + FOLL_WRITE | FOLL_REMOTE); kvm_async_page_present_sync(vcpu, apf); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); + apf->vcpu = NULL; spin_unlock(&vcpu->async_pf.lock); /* @@ -112,6 +114,8 @@ static void async_pf_execute(struct work_struct *work) void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) { + spin_lock(&vcpu->async_pf.lock); + /* cancel outstanding work queue item */ while (!list_empty(&vcpu->async_pf.queue)) { struct kvm_async_pf *work = @@ -119,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) typeof(*work), queue); list_del(&work->queue); + /* + * We know it's present in vcpu->async_pf.done, do + * nothing here. + */ + if (!work->vcpu) + continue; + + spin_unlock(&vcpu->async_pf.lock); #ifdef CONFIG_KVM_ASYNC_PF_SYNC flush_work(&work->work); #else @@ -128,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) kmem_cache_free(async_pf_cache, work); } #endif + spin_lock(&vcpu->async_pf.lock); } - spin_lock(&vcpu->async_pf.lock); while (!list_empty(&vcpu->async_pf.done)) { struct kvm_async_pf *work = list_first_entry(&vcpu->async_pf.done, |