1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef _ASM_X86_KVM_H
8 #define _ASM_X86_KVM_H
9 #include <linux/const.h>
10 #include <linux/bits.h>
11 #include <linux/types.h>
12 #include <linux/ioctl.h>
13 #include <linux/stddef.h>
14 #define KVM_PIO_PAGE_OFFSET 1
15 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
16 #define KVM_DIRTY_LOG_PAGE_OFFSET 64
17 #define DE_VECTOR 0
18 #define DB_VECTOR 1
19 #define BP_VECTOR 3
20 #define OF_VECTOR 4
21 #define BR_VECTOR 5
22 #define UD_VECTOR 6
23 #define NM_VECTOR 7
24 #define DF_VECTOR 8
25 #define TS_VECTOR 10
26 #define NP_VECTOR 11
27 #define SS_VECTOR 12
28 #define GP_VECTOR 13
29 #define PF_VECTOR 14
30 #define MF_VECTOR 16
31 #define AC_VECTOR 17
32 #define MC_VECTOR 18
33 #define XM_VECTOR 19
34 #define VE_VECTOR 20
35 #define __KVM_HAVE_PIT
36 #define __KVM_HAVE_IOAPIC
37 #define __KVM_HAVE_IRQ_LINE
38 #define __KVM_HAVE_MSI
39 #define __KVM_HAVE_USER_NMI
40 #define __KVM_HAVE_MSIX
41 #define __KVM_HAVE_MCE
42 #define __KVM_HAVE_PIT_STATE2
43 #define __KVM_HAVE_XEN_HVM
44 #define __KVM_HAVE_VCPU_EVENTS
45 #define __KVM_HAVE_DEBUGREGS
46 #define __KVM_HAVE_XSAVE
47 #define __KVM_HAVE_XCRS
48 #define KVM_NR_INTERRUPTS 256
49 struct kvm_pic_state {
50   __u8 last_irr;
51   __u8 irr;
52   __u8 imr;
53   __u8 isr;
54   __u8 priority_add;
55   __u8 irq_base;
56   __u8 read_reg_select;
57   __u8 poll;
58   __u8 special_mask;
59   __u8 init_state;
60   __u8 auto_eoi;
61   __u8 rotate_on_auto_eoi;
62   __u8 special_fully_nested_mode;
63   __u8 init4;
64   __u8 elcr;
65   __u8 elcr_mask;
66 };
67 #define KVM_IOAPIC_NUM_PINS 24
68 struct kvm_ioapic_state {
69   __u64 base_address;
70   __u32 ioregsel;
71   __u32 id;
72   __u32 irr;
73   __u32 pad;
74   union {
75     __u64 bits;
76     struct {
77       __u8 vector;
78       __u8 delivery_mode : 3;
79       __u8 dest_mode : 1;
80       __u8 delivery_status : 1;
81       __u8 polarity : 1;
82       __u8 remote_irr : 1;
83       __u8 trig_mode : 1;
84       __u8 mask : 1;
85       __u8 reserve : 7;
86       __u8 reserved[4];
87       __u8 dest_id;
88     } fields;
89   } redirtbl[KVM_IOAPIC_NUM_PINS];
90 };
91 #define KVM_IRQCHIP_PIC_MASTER 0
92 #define KVM_IRQCHIP_PIC_SLAVE 1
93 #define KVM_IRQCHIP_IOAPIC 2
94 #define KVM_NR_IRQCHIPS 3
95 #define KVM_RUN_X86_SMM (1 << 0)
96 #define KVM_RUN_X86_BUS_LOCK (1 << 1)
97 struct kvm_regs {
98   __u64 rax, rbx, rcx, rdx;
99   __u64 rsi, rdi, rsp, rbp;
100   __u64 r8, r9, r10, r11;
101   __u64 r12, r13, r14, r15;
102   __u64 rip, rflags;
103 };
104 #define KVM_APIC_REG_SIZE 0x400
105 struct kvm_lapic_state {
106   char regs[KVM_APIC_REG_SIZE];
107 };
108 struct kvm_segment {
109   __u64 base;
110   __u32 limit;
111   __u16 selector;
112   __u8 type;
113   __u8 present, dpl, db, s, l, g, avl;
114   __u8 unusable;
115   __u8 padding;
116 };
117 struct kvm_dtable {
118   __u64 base;
119   __u16 limit;
120   __u16 padding[3];
121 };
122 struct kvm_sregs {
123   struct kvm_segment cs, ds, es, fs, gs, ss;
124   struct kvm_segment tr, ldt;
125   struct kvm_dtable gdt, idt;
126   __u64 cr0, cr2, cr3, cr4, cr8;
127   __u64 efer;
128   __u64 apic_base;
129   __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
130 };
131 struct kvm_sregs2 {
132   struct kvm_segment cs, ds, es, fs, gs, ss;
133   struct kvm_segment tr, ldt;
134   struct kvm_dtable gdt, idt;
135   __u64 cr0, cr2, cr3, cr4, cr8;
136   __u64 efer;
137   __u64 apic_base;
138   __u64 flags;
139   __u64 pdptrs[4];
140 };
141 #define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
142 struct kvm_fpu {
143   __u8 fpr[8][16];
144   __u16 fcw;
145   __u16 fsw;
146   __u8 ftwx;
147   __u8 pad1;
148   __u16 last_opcode;
149   __u64 last_ip;
150   __u64 last_dp;
151   __u8 xmm[16][16];
152   __u32 mxcsr;
153   __u32 pad2;
154 };
155 struct kvm_msr_entry {
156   __u32 index;
157   __u32 reserved;
158   __u64 data;
159 };
160 struct kvm_msrs {
161   __u32 nmsrs;
162   __u32 pad;
163   struct kvm_msr_entry entries[];
164 };
165 struct kvm_msr_list {
166   __u32 nmsrs;
167   __u32 indices[];
168 };
169 #define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
170 struct kvm_msr_filter_range {
171 #define KVM_MSR_FILTER_READ (1 << 0)
172 #define KVM_MSR_FILTER_WRITE (1 << 1)
173 #define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)
174   __u32 flags;
175   __u32 nmsrs;
176   __u32 base;
177   __u8 * bitmap;
178 };
179 #define KVM_MSR_FILTER_MAX_RANGES 16
180 struct kvm_msr_filter {
181 #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
182 #define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)
183 #define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY)
184   __u32 flags;
185   struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
186 };
187 struct kvm_cpuid_entry {
188   __u32 function;
189   __u32 eax;
190   __u32 ebx;
191   __u32 ecx;
192   __u32 edx;
193   __u32 padding;
194 };
195 struct kvm_cpuid {
196   __u32 nent;
197   __u32 padding;
198   struct kvm_cpuid_entry entries[];
199 };
200 struct kvm_cpuid_entry2 {
201   __u32 function;
202   __u32 index;
203   __u32 flags;
204   __u32 eax;
205   __u32 ebx;
206   __u32 ecx;
207   __u32 edx;
208   __u32 padding[3];
209 };
210 #define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0)
211 #define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1)
212 #define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2)
213 struct kvm_cpuid2 {
214   __u32 nent;
215   __u32 padding;
216   struct kvm_cpuid_entry2 entries[];
217 };
218 struct kvm_pit_channel_state {
219   __u32 count;
220   __u16 latched_count;
221   __u8 count_latched;
222   __u8 status_latched;
223   __u8 status;
224   __u8 read_state;
225   __u8 write_state;
226   __u8 write_latch;
227   __u8 rw_mode;
228   __u8 mode;
229   __u8 bcd;
230   __u8 gate;
231   __s64 count_load_time;
232 };
233 struct kvm_debug_exit_arch {
234   __u32 exception;
235   __u32 pad;
236   __u64 pc;
237   __u64 dr6;
238   __u64 dr7;
239 };
240 #define KVM_GUESTDBG_USE_SW_BP 0x00010000
241 #define KVM_GUESTDBG_USE_HW_BP 0x00020000
242 #define KVM_GUESTDBG_INJECT_DB 0x00040000
243 #define KVM_GUESTDBG_INJECT_BP 0x00080000
244 #define KVM_GUESTDBG_BLOCKIRQ 0x00100000
245 struct kvm_guest_debug_arch {
246   __u64 debugreg[8];
247 };
248 struct kvm_pit_state {
249   struct kvm_pit_channel_state channels[3];
250 };
251 #define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
252 #define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002
253 struct kvm_pit_state2 {
254   struct kvm_pit_channel_state channels[3];
255   __u32 flags;
256   __u32 reserved[9];
257 };
258 struct kvm_reinject_control {
259   __u8 pit_reinject;
260   __u8 reserved[31];
261 };
262 #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
263 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
264 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
265 #define KVM_VCPUEVENT_VALID_SMM 0x00000008
266 #define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
267 #define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020
268 #define KVM_X86_SHADOW_INT_MOV_SS 0x01
269 #define KVM_X86_SHADOW_INT_STI 0x02
270 struct kvm_vcpu_events {
271   struct {
272     __u8 injected;
273     __u8 nr;
274     __u8 has_error_code;
275     __u8 pending;
276     __u32 error_code;
277   } exception;
278   struct {
279     __u8 injected;
280     __u8 nr;
281     __u8 soft;
282     __u8 shadow;
283   } interrupt;
284   struct {
285     __u8 injected;
286     __u8 pending;
287     __u8 masked;
288     __u8 pad;
289   } nmi;
290   __u32 sipi_vector;
291   __u32 flags;
292   struct {
293     __u8 smm;
294     __u8 pending;
295     __u8 smm_inside_nmi;
296     __u8 latched_init;
297   } smi;
298   struct {
299     __u8 pending;
300   } triple_fault;
301   __u8 reserved[26];
302   __u8 exception_has_payload;
303   __u64 exception_payload;
304 };
305 struct kvm_debugregs {
306   __u64 db[4];
307   __u64 dr6;
308   __u64 dr7;
309   __u64 flags;
310   __u64 reserved[9];
311 };
312 struct kvm_xsave {
313   __u32 region[1024];
314   __u32 extra[];
315 };
316 #define KVM_MAX_XCRS 16
317 struct kvm_xcr {
318   __u32 xcr;
319   __u32 reserved;
320   __u64 value;
321 };
322 struct kvm_xcrs {
323   __u32 nr_xcrs;
324   __u32 flags;
325   struct kvm_xcr xcrs[KVM_MAX_XCRS];
326   __u64 padding[16];
327 };
328 #define KVM_SYNC_X86_REGS (1UL << 0)
329 #define KVM_SYNC_X86_SREGS (1UL << 1)
330 #define KVM_SYNC_X86_EVENTS (1UL << 2)
331 #define KVM_SYNC_X86_VALID_FIELDS (KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS | KVM_SYNC_X86_EVENTS)
332 struct kvm_sync_regs {
333   struct kvm_regs regs;
334   struct kvm_sregs sregs;
335   struct kvm_vcpu_events events;
336 };
337 #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
338 #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
339 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
340 #define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
341 #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
342 #define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
343 #define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
344 #define KVM_STATE_NESTED_FORMAT_VMX 0
345 #define KVM_STATE_NESTED_FORMAT_SVM 1
346 #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
347 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
348 #define KVM_STATE_NESTED_EVMCS 0x00000004
349 #define KVM_STATE_NESTED_MTF_PENDING 0x00000008
350 #define KVM_STATE_NESTED_GIF_SET 0x00000100
351 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
352 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002
353 #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
354 #define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000
355 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
356 #define KVM_X86_XCOMP_GUEST_SUPP 0
357 struct kvm_vmx_nested_state_data {
358   __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
359   __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
360 };
361 struct kvm_vmx_nested_state_hdr {
362   __u64 vmxon_pa;
363   __u64 vmcs12_pa;
364   struct {
365     __u16 flags;
366   } smm;
367   __u16 pad;
368   __u32 flags;
369   __u64 preemption_timer_deadline;
370 };
371 struct kvm_svm_nested_state_data {
372   __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
373 };
374 struct kvm_svm_nested_state_hdr {
375   __u64 vmcb_pa;
376 };
377 struct kvm_nested_state {
378   __u16 flags;
379   __u16 format;
380   __u32 size;
381   union {
382     struct kvm_vmx_nested_state_hdr vmx;
383     struct kvm_svm_nested_state_hdr svm;
384     __u8 pad[120];
385   } hdr;
386   union {
387     __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx);
388     __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);
389   } data;
390 };
391 struct kvm_pmu_event_filter {
392   __u32 action;
393   __u32 nevents;
394   __u32 fixed_counter_bitmap;
395   __u32 flags;
396   __u32 pad[4];
397   __u64 events[];
398 };
399 #define KVM_PMU_EVENT_ALLOW 0
400 #define KVM_PMU_EVENT_DENY 1
401 #define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)
402 #define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
403 struct kvm_x86_mce {
404   __u64 status;
405   __u64 addr;
406   __u64 misc;
407   __u64 mcg_status;
408   __u8 bank;
409   __u8 pad1[7];
410   __u64 pad2[3];
411 };
412 #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
413 #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
414 #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
415 #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
416 #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
417 #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
418 #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
419 #define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
420 #define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
421 struct kvm_xen_hvm_config {
422   __u32 flags;
423   __u32 msr;
424   __u64 blob_addr_32;
425   __u64 blob_addr_64;
426   __u8 blob_size_32;
427   __u8 blob_size_64;
428   __u8 pad2[30];
429 };
430 struct kvm_xen_hvm_attr {
431   __u16 type;
432   __u16 pad[3];
433   union {
434     __u8 long_mode;
435     __u8 vector;
436     __u8 runstate_update_flag;
437     union {
438       __u64 gfn;
439 #define KVM_XEN_INVALID_GFN ((__u64) - 1)
440       __u64 hva;
441     } shared_info;
442     struct {
443       __u32 send_port;
444       __u32 type;
445       __u32 flags;
446 #define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
447 #define KVM_XEN_EVTCHN_UPDATE (1 << 1)
448 #define KVM_XEN_EVTCHN_RESET (1 << 2)
449       union {
450         struct {
451           __u32 port;
452           __u32 vcpu;
453           __u32 priority;
454         } port;
455         struct {
456           __u32 port;
457           __s32 fd;
458         } eventfd;
459         __u32 padding[4];
460       } deliver;
461     } evtchn;
462     __u32 xen_version;
463     __u64 pad[8];
464   } u;
465 };
466 #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
467 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
468 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
469 #define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
470 #define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
471 #define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
472 #define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6
473 struct kvm_xen_vcpu_attr {
474   __u16 type;
475   __u16 pad[3];
476   union {
477     __u64 gpa;
478 #define KVM_XEN_INVALID_GPA ((__u64) - 1)
479     __u64 hva;
480     __u64 pad[8];
481     struct {
482       __u64 state;
483       __u64 state_entry_time;
484       __u64 time_running;
485       __u64 time_runnable;
486       __u64 time_blocked;
487       __u64 time_offline;
488     } runstate;
489     __u32 vcpu_id;
490     struct {
491       __u32 port;
492       __u32 priority;
493       __u64 expires_ns;
494     } timer;
495     __u8 vector;
496   } u;
497 };
498 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
499 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
500 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
501 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
502 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
503 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
504 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
505 #define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
506 #define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
507 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9
508 enum sev_cmd_id {
509   KVM_SEV_INIT = 0,
510   KVM_SEV_ES_INIT,
511   KVM_SEV_LAUNCH_START,
512   KVM_SEV_LAUNCH_UPDATE_DATA,
513   KVM_SEV_LAUNCH_UPDATE_VMSA,
514   KVM_SEV_LAUNCH_SECRET,
515   KVM_SEV_LAUNCH_MEASURE,
516   KVM_SEV_LAUNCH_FINISH,
517   KVM_SEV_SEND_START,
518   KVM_SEV_SEND_UPDATE_DATA,
519   KVM_SEV_SEND_UPDATE_VMSA,
520   KVM_SEV_SEND_FINISH,
521   KVM_SEV_RECEIVE_START,
522   KVM_SEV_RECEIVE_UPDATE_DATA,
523   KVM_SEV_RECEIVE_UPDATE_VMSA,
524   KVM_SEV_RECEIVE_FINISH,
525   KVM_SEV_GUEST_STATUS,
526   KVM_SEV_DBG_DECRYPT,
527   KVM_SEV_DBG_ENCRYPT,
528   KVM_SEV_CERT_EXPORT,
529   KVM_SEV_GET_ATTESTATION_REPORT,
530   KVM_SEV_SEND_CANCEL,
531   KVM_SEV_NR_MAX,
532 };
533 struct kvm_sev_cmd {
534   __u32 id;
535   __u32 pad0;
536   __u64 data;
537   __u32 error;
538   __u32 sev_fd;
539 };
540 struct kvm_sev_launch_start {
541   __u32 handle;
542   __u32 policy;
543   __u64 dh_uaddr;
544   __u32 dh_len;
545   __u32 pad0;
546   __u64 session_uaddr;
547   __u32 session_len;
548   __u32 pad1;
549 };
550 struct kvm_sev_launch_update_data {
551   __u64 uaddr;
552   __u32 len;
553   __u32 pad0;
554 };
555 struct kvm_sev_launch_secret {
556   __u64 hdr_uaddr;
557   __u32 hdr_len;
558   __u32 pad0;
559   __u64 guest_uaddr;
560   __u32 guest_len;
561   __u32 pad1;
562   __u64 trans_uaddr;
563   __u32 trans_len;
564   __u32 pad2;
565 };
566 struct kvm_sev_launch_measure {
567   __u64 uaddr;
568   __u32 len;
569   __u32 pad0;
570 };
571 struct kvm_sev_guest_status {
572   __u32 handle;
573   __u32 policy;
574   __u32 state;
575 };
576 struct kvm_sev_dbg {
577   __u64 src_uaddr;
578   __u64 dst_uaddr;
579   __u32 len;
580   __u32 pad0;
581 };
582 struct kvm_sev_attestation_report {
583   __u8 mnonce[16];
584   __u64 uaddr;
585   __u32 len;
586   __u32 pad0;
587 };
588 struct kvm_sev_send_start {
589   __u32 policy;
590   __u32 pad0;
591   __u64 pdh_cert_uaddr;
592   __u32 pdh_cert_len;
593   __u32 pad1;
594   __u64 plat_certs_uaddr;
595   __u32 plat_certs_len;
596   __u32 pad2;
597   __u64 amd_certs_uaddr;
598   __u32 amd_certs_len;
599   __u32 pad3;
600   __u64 session_uaddr;
601   __u32 session_len;
602   __u32 pad4;
603 };
604 struct kvm_sev_send_update_data {
605   __u64 hdr_uaddr;
606   __u32 hdr_len;
607   __u32 pad0;
608   __u64 guest_uaddr;
609   __u32 guest_len;
610   __u32 pad1;
611   __u64 trans_uaddr;
612   __u32 trans_len;
613   __u32 pad2;
614 };
615 struct kvm_sev_receive_start {
616   __u32 handle;
617   __u32 policy;
618   __u64 pdh_uaddr;
619   __u32 pdh_len;
620   __u32 pad0;
621   __u64 session_uaddr;
622   __u32 session_len;
623   __u32 pad1;
624 };
625 struct kvm_sev_receive_update_data {
626   __u64 hdr_uaddr;
627   __u32 hdr_len;
628   __u32 pad0;
629   __u64 guest_uaddr;
630   __u32 guest_len;
631   __u32 pad1;
632   __u64 trans_uaddr;
633   __u32 trans_len;
634   __u32 pad2;
635 };
636 #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
637 #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
638 struct kvm_hyperv_eventfd {
639   __u32 conn_id;
640   __s32 fd;
641   __u32 flags;
642   __u32 padding[3];
643 };
644 #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
645 #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
646 #define KVM_PMU_ENCODE_MASKED_ENTRY(event_select,mask,match,exclude) (((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | (((mask) & 0xFFULL) << 56) | (((match) & 0xFFULL) << 8) | ((__u64) (! ! (exclude)) << 55))
647 #define KVM_PMU_MASKED_ENTRY_EVENT_SELECT (__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32))
648 #define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56))
649 #define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8))
650 #define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55))
651 #define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)
652 #define KVM_VCPU_TSC_CTRL 0
653 #define KVM_VCPU_TSC_OFFSET 0
654 #define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0)
655 #define KVM_X86_DEFAULT_VM 0
656 #define KVM_X86_SW_PROTECTED_VM 1
657 #endif
658