blob: 76de36e56cdfe828e17b46f88a6d60f14a7f0bc2 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
struct kvm_one_reg;
struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
struct kvm_memslots;
enum kvm_mr_change;
#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <asm/kvm_types.h>
/*
* Address types:
*
* gva - guest virtual address
* gpa - guest physical address
* gfn - guest frame number
* hva - host virtual address
* hpa - host physical address
* hfn - host frame number
*/
typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
#define GPA_INVALID (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
enum pfn_cache_usage {
KVM_GUEST_USES_PFN = BIT(0),
KVM_HOST_USES_PFN = BIT(1),
KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
};
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
unsigned long len;
struct kvm_memory_slot *memslot;
};
struct gfn_to_pfn_cache {
u64 generation;
gpa_t gpa;
unsigned long uhva;
struct kvm_memory_slot *memslot;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
struct list_head list;
rwlock_t lock;
struct mutex refresh_lock;
void *khva;
kvm_pfn_t pfn;
enum pfn_cache_usage usage;
bool active;
bool valid;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,
* e.g. page fault handlers. Gracefully handling allocation failures deep in
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
*
* The @capacity field and @objects array are lazily initialized when the cache
* is topped up (__kvm_mmu_topup_memory_cache()).
*/
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
gfp_t gfp_custom;
struct kmem_cache *kmem_cache;
int capacity;
void **objects;
};
#endif
#define HALT_POLL_HIST_COUNT 32
struct kvm_vm_stat_generic {
u64 remote_tlb_flush;
u64 remote_tlb_flush_requests;
};
struct kvm_vcpu_stat_generic {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
u64 halt_wakeup;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 halt_wait_ns;
u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
u64 blocking;
};
#define KVM_STATS_NAME_SIZE 48
#endif /* __KVM_TYPES_H__ */
|