summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/vcpu_sbi_hsm.c
blob: dce667f4b6ab08d023f54ac64cf072b65118bc1b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
 *
 * Authors:
 *     Atish Patra <atish.patra@wdc.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>

static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *reset_cntx;
	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
	struct kvm_vcpu *target_vcpu;
	unsigned long target_vcpuid = cp->a0;
	int ret = 0;

	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
	if (!target_vcpu)
		return SBI_ERR_INVALID_PARAM;

	spin_lock(&target_vcpu->arch.mp_state_lock);

	if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
		ret = SBI_ERR_ALREADY_AVAILABLE;
		goto out;
	}

	spin_lock(&target_vcpu->arch.reset_cntx_lock);
	reset_cntx = &target_vcpu->arch.guest_reset_context;
	/* start address */
	reset_cntx->sepc = cp->a1;
	/* target vcpu id to start */
	reset_cntx->a0 = target_vcpuid;
	/* private data passed from kernel */
	reset_cntx->a1 = cp->a2;
	spin_unlock(&target_vcpu->arch.reset_cntx_lock);

	kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);

	__kvm_riscv_vcpu_power_on(target_vcpu);

out:
	spin_unlock(&target_vcpu->arch.mp_state_lock);

	return ret;
}

static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
{
	int ret = 0;

	spin_lock(&vcpu->arch.mp_state_lock);

	if (kvm_riscv_vcpu_stopped(vcpu)) {
		ret = SBI_ERR_FAILURE;
		goto out;
	}

	__kvm_riscv_vcpu_power_off(vcpu);

out:
	spin_unlock(&vcpu->arch.mp_state_lock);

	return ret;
}

static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
	unsigned long target_vcpuid = cp->a0;
	struct kvm_vcpu *target_vcpu;

	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
	if (!target_vcpu)
		return SBI_ERR_INVALID_PARAM;
	if (!kvm_riscv_vcpu_stopped(target_vcpu))
		return SBI_HSM_STATE_STARTED;
	else if (vcpu->stat.generic.blocking)
		return SBI_HSM_STATE_SUSPENDED;
	else
		return SBI_HSM_STATE_STOPPED;
}

static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
				   struct kvm_vcpu_sbi_return *retdata)
{
	int ret = 0;
	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
	unsigned long funcid = cp->a6;

	switch (funcid) {
	case SBI_EXT_HSM_HART_START:
		ret = kvm_sbi_hsm_vcpu_start(vcpu);
		break;
	case SBI_EXT_HSM_HART_STOP:
		ret = kvm_sbi_hsm_vcpu_stop(vcpu);
		break;
	case SBI_EXT_HSM_HART_STATUS:
		ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
		if (ret >= 0) {
			retdata->out_val = ret;
			retdata->err_val = 0;
		}
		return 0;
	case SBI_EXT_HSM_HART_SUSPEND:
		switch (cp->a0) {
		case SBI_HSM_SUSPEND_RET_DEFAULT:
			kvm_riscv_vcpu_wfi(vcpu);
			break;
		case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
			ret = SBI_ERR_NOT_SUPPORTED;
			break;
		default:
			ret = SBI_ERR_INVALID_PARAM;
		}
		break;
	default:
		ret = SBI_ERR_NOT_SUPPORTED;
	}

	retdata->err_val = ret;

	return 0;
}

const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
	.extid_start = SBI_EXT_HSM,
	.extid_end = SBI_EXT_HSM,
	.handler = kvm_sbi_ext_hsm_handler,
};