Skip to content

Commit 9736783

Browse files
author
Maxim Levitsky
committed
x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation
JIRA: https://issues.redhat.com/browse/RHEL-74410 commit c560bc9 Author: Maxim Levitsky <mlevitsk@redhat.com> Date: Mon May 12 14:04:05 2025 -0400 x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation Use kvm_lock_all_vcpus instead of sev's own implementation. Because kvm_lock_all_vcpus uses the _nest_lock feature of lockdep, which ignores subclasses, there is no longer a need to use separate subclasses for source and target VMs. No functional change intended. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Message-ID: <20250512180407.659015-5-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
1 parent e6353d3 commit 9736783

File tree

1 file changed

+4
-68
lines changed

1 file changed

+4
-68
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1887,70 +1887,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
18871887
atomic_set_release(&src_sev->migration_in_progress, 0);
18881888
}
18891889

1890-
/* vCPU mutex subclasses. */
1891-
enum sev_migration_role {
1892-
SEV_MIGRATION_SOURCE = 0,
1893-
SEV_MIGRATION_TARGET,
1894-
SEV_NR_MIGRATION_ROLES,
1895-
};
1896-
1897-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1898-
enum sev_migration_role role)
1899-
{
1900-
struct kvm_vcpu *vcpu;
1901-
unsigned long i, j;
1902-
1903-
kvm_for_each_vcpu(i, vcpu, kvm) {
1904-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1905-
goto out_unlock;
1906-
1907-
#ifdef CONFIG_PROVE_LOCKING
1908-
if (!i)
1909-
/*
1910-
* Reset the role to one that avoids colliding with
1911-
* the role used for the first vcpu mutex.
1912-
*/
1913-
role = SEV_NR_MIGRATION_ROLES;
1914-
else
1915-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1916-
#endif
1917-
}
1918-
1919-
return 0;
1920-
1921-
out_unlock:
1922-
1923-
kvm_for_each_vcpu(j, vcpu, kvm) {
1924-
if (i == j)
1925-
break;
1926-
1927-
#ifdef CONFIG_PROVE_LOCKING
1928-
if (j)
1929-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1930-
#endif
1931-
1932-
mutex_unlock(&vcpu->mutex);
1933-
}
1934-
return -EINTR;
1935-
}
1936-
1937-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1938-
{
1939-
struct kvm_vcpu *vcpu;
1940-
unsigned long i;
1941-
bool first = true;
1942-
1943-
kvm_for_each_vcpu(i, vcpu, kvm) {
1944-
if (first)
1945-
first = false;
1946-
else
1947-
mutex_acquire(&vcpu->mutex.dep_map,
1948-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1949-
1950-
mutex_unlock(&vcpu->mutex);
1951-
}
1952-
}
1953-
19541890
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19551891
{
19561892
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2090,10 +2026,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
20902026
charged = true;
20912027
}
20922028

2093-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2029+
ret = kvm_lock_all_vcpus(kvm);
20942030
if (ret)
20952031
goto out_dst_cgroup;
2096-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2032+
ret = kvm_lock_all_vcpus(source_kvm);
20972033
if (ret)
20982034
goto out_dst_vcpu;
20992035

@@ -2107,9 +2043,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21072043
ret = 0;
21082044

21092045
out_source_vcpu:
2110-
sev_unlock_vcpus_for_migration(source_kvm);
2046+
kvm_unlock_all_vcpus(source_kvm);
21112047
out_dst_vcpu:
2112-
sev_unlock_vcpus_for_migration(kvm);
2048+
kvm_unlock_all_vcpus(kvm);
21132049
out_dst_cgroup:
21142050
/* Operates on the source on success, on the destination on failure. */
21152051
if (charged)

0 commit comments

Comments
 (0)