diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-04 11:26:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-04 11:26:29 -0700 |
commit | 0b90c5637dfea8a08f87db5dd16000eb679013a3 (patch) | |
tree | 1e64c72282c411c563758a26d34cfe023a56668a /drivers/hv | |
parent | e4f1b8202fb59c56a3de7642d50326923670513f (diff) | |
parent | 284930a0146ade1ce0250a1d3bae7a675af4bb3b (diff) |
Merge tag 'hyperv-next-signed-20230902' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux
Pull hyperv updates from Wei Liu:
- Support for SEV-SNP guests on Hyper-V (Tianyu Lan)
- Support for TDX guests on Hyper-V (Dexuan Cui)
- Use SBRM API in Hyper-V balloon driver (Mitchell Levy)
- Avoid dereferencing ACPI root object handle in VMBus driver (Maciej
Szmigiero)
- A few misecllaneous fixes (Jiapeng Chong, Nathan Chancellor, Saurabh
Sengar)
* tag 'hyperv-next-signed-20230902' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: (24 commits)
x86/hyperv: Remove duplicate include
x86/hyperv: Move the code in ivm.c around to avoid unnecessary ifdef's
x86/hyperv: Remove hv_isolation_type_en_snp
x86/hyperv: Use TDX GHCI to access some MSRs in a TDX VM with the paravisor
Drivers: hv: vmbus: Bring the post_msg_page back for TDX VMs with the paravisor
x86/hyperv: Introduce a global variable hyperv_paravisor_present
Drivers: hv: vmbus: Support >64 VPs for a fully enlightened TDX/SNP VM
x86/hyperv: Fix serial console interrupts for fully enlightened TDX guests
Drivers: hv: vmbus: Support fully enlightened TDX guests
x86/hyperv: Support hypercalls for fully enlightened TDX guests
x86/hyperv: Add hv_isolation_type_tdx() to detect TDX guests
x86/hyperv: Fix undefined reference to isolation_type_en_snp without CONFIG_HYPERV
x86/hyperv: Add missing 'inline' to hv_snp_boot_ap() stub
hv: hyperv.h: Replace one-element array with flexible-array member
Drivers: hv: vmbus: Don't dereference ACPI root object handle
x86/hyperv: Add hyperv-specific handling for VMMCALL under SEV-ES
x86/hyperv: Add smp support for SEV-SNP guest
clocksource: hyper-v: Mark hyperv tsc page unencrypted in sev-snp enlightened guest
x86/hyperv: Use vmmcall to implement Hyper-V hypercall in sev-snp enlightened guest
drivers: hv: Mark percpu hvcall input arg page unencrypted in SEV-SNP enlightened guest
...
Diffstat (limited to 'drivers/hv')
-rw-r--r-- | drivers/hv/connection.c | 16 | ||||
-rw-r--r-- | drivers/hv/hv.c | 131 | ||||
-rw-r--r-- | drivers/hv/hv_balloon.c | 82 | ||||
-rw-r--r-- | drivers/hv/hv_common.c | 48 | ||||
-rw-r--r-- | drivers/hv/hyperv_vmbus.h | 11 | ||||
-rw-r--r-- | drivers/hv/vmbus_drv.c | 3 |
6 files changed, 225 insertions, 66 deletions
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index ebf15f31d97e..3cabeeabb1ca 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -98,6 +98,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) */ if (version >= VERSION_WIN10_V5) { msg->msg_sint = VMBUS_MESSAGE_SINT; + msg->msg_vtl = ms_hyperv.vtl; vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4; } else { msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); @@ -482,10 +483,17 @@ void vmbus_set_event(struct vmbus_channel *channel) ++channel->sig_events; - if (hv_isolation_type_snp()) - hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event, - NULL, sizeof(channel->sig_event)); - else + if (ms_hyperv.paravisor_present) { + if (hv_isolation_type_snp()) + hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event, + NULL, sizeof(channel->sig_event)); + else if (hv_isolation_type_tdx()) + hv_tdx_hypercall(HVCALL_SIGNAL_EVENT | HV_HYPERCALL_FAST_BIT, + channel->sig_event, 0); + else + WARN_ON_ONCE(1); + } else { hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event); + } } EXPORT_SYMBOL_GPL(vmbus_set_event); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index de6708dbe0df..51e5018ac9b2 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -20,6 +20,7 @@ #include <linux/interrupt.h> #include <clocksource/hyperv_timer.h> #include <asm/mshyperv.h> +#include <linux/set_memory.h> #include "hyperv_vmbus.h" /* The one and only */ @@ -56,20 +57,37 @@ int hv_post_message(union hv_connection_id connection_id, local_irq_save(flags); - aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg); + /* + * A TDX VM with the paravisor must use the decrypted post_msg_page: see + * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor + * can use the encrypted hyperv_pcpu_input_arg because it copies the + * input into the GHCB page, which has been decrypted by the paravisor. + */ + if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present) + aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page; + else + aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg); + aligned_msg->connectionid = connection_id; aligned_msg->reserved = 0; aligned_msg->message_type = message_type; aligned_msg->payload_size = payload_size; memcpy((void *)aligned_msg->payload, payload, payload_size); - if (hv_isolation_type_snp()) - status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE, - (void *)aligned_msg, NULL, - sizeof(*aligned_msg)); - else + if (ms_hyperv.paravisor_present) { + if (hv_isolation_type_tdx()) + status = hv_tdx_hypercall(HVCALL_POST_MESSAGE, + virt_to_phys(aligned_msg), 0); + else if (hv_isolation_type_snp()) + status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE, + aligned_msg, NULL, + sizeof(*aligned_msg)); + else + status = HV_STATUS_INVALID_PARAMETER; + } else { status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); + } local_irq_restore(flags); @@ -78,7 +96,7 @@ int hv_post_message(union hv_connection_id connection_id, int hv_synic_alloc(void) { - int cpu; + int cpu, ret = -ENOMEM; struct hv_per_cpu_context *hv_cpu; /* @@ -104,11 +122,29 @@ int hv_synic_alloc(void) tasklet_init(&hv_cpu->msg_dpc, vmbus_on_msg_dpc, (unsigned long) hv_cpu); + if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { + hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC); + if (hv_cpu->post_msg_page == NULL) { + pr_err("Unable to allocate post msg page\n"); + goto err; + } + + ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1); + if (ret) { + pr_err("Failed to decrypt post msg page: %d\n", ret); + /* Just leak the page, as it's unsafe to free the page. */ + hv_cpu->post_msg_page = NULL; + goto err; + } + + memset(hv_cpu->post_msg_page, 0, PAGE_SIZE); + } + /* * Synic message and event pages are allocated by paravisor. * Skip these pages allocation here. */ - if (!hv_isolation_type_snp() && !hv_root_partition) { + if (!ms_hyperv.paravisor_present && !hv_root_partition) { hv_cpu->synic_message_page = (void *)get_zeroed_page(GFP_ATOMIC); if (hv_cpu->synic_message_page == NULL) { @@ -120,29 +156,96 @@ int hv_synic_alloc(void) (void *)get_zeroed_page(GFP_ATOMIC); if (hv_cpu->synic_event_page == NULL) { pr_err("Unable to allocate SYNIC event page\n"); + + free_page((unsigned long)hv_cpu->synic_message_page); + hv_cpu->synic_message_page = NULL; goto err; } } + + if (!ms_hyperv.paravisor_present && + (hv_isolation_type_snp() || hv_isolation_type_tdx())) { + ret = set_memory_decrypted((unsigned long) + hv_cpu->synic_message_page, 1); + if (ret) { + pr_err("Failed to decrypt SYNIC msg page: %d\n", ret); + hv_cpu->synic_message_page = NULL; + + /* + * Free the event page here so that hv_synic_free() + * won't later try to re-encrypt it. + */ + free_page((unsigned long)hv_cpu->synic_event_page); + hv_cpu->synic_event_page = NULL; + goto err; + } + + ret = set_memory_decrypted((unsigned long) + hv_cpu->synic_event_page, 1); + if (ret) { + pr_err("Failed to decrypt SYNIC event page: %d\n", ret); + hv_cpu->synic_event_page = NULL; + goto err; + } + + memset(hv_cpu->synic_message_page, 0, PAGE_SIZE); + memset(hv_cpu->synic_event_page, 0, PAGE_SIZE); + } } return 0; + err: /* * Any memory allocations that succeeded will be freed when * the caller cleans up by calling hv_synic_free() */ - return -ENOMEM; + return ret; } void hv_synic_free(void) { - int cpu; + int cpu, ret; for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); + /* It's better to leak the page if the encryption fails. */ + if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { + if (hv_cpu->post_msg_page) { + ret = set_memory_encrypted((unsigned long) + hv_cpu->post_msg_page, 1); + if (ret) { + pr_err("Failed to encrypt post msg page: %d\n", ret); + hv_cpu->post_msg_page = NULL; + } + } + } + + if (!ms_hyperv.paravisor_present && + (hv_isolation_type_snp() || hv_isolation_type_tdx())) { + if (hv_cpu->synic_message_page) { + ret = set_memory_encrypted((unsigned long) + hv_cpu->synic_message_page, 1); + if (ret) { + pr_err("Failed to encrypt SYNIC msg page: %d\n", ret); + hv_cpu->synic_message_page = NULL; + } + } + + if (hv_cpu->synic_event_page) { + ret = set_memory_encrypted((unsigned long) + hv_cpu->synic_event_page, 1); + if (ret) { + pr_err("Failed to encrypt SYNIC event page: %d\n", ret); + hv_cpu->synic_event_page = NULL; + } + } + } + + free_page((unsigned long)hv_cpu->post_msg_page); free_page((unsigned long)hv_cpu->synic_event_page); free_page((unsigned long)hv_cpu->synic_message_page); } @@ -170,7 +273,7 @@ void hv_synic_enable_regs(unsigned int cpu) simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP); simp.simp_enabled = 1; - if (hv_isolation_type_snp() || hv_root_partition) { + if (ms_hyperv.paravisor_present || hv_root_partition) { /* Mask out vTOM bit. ioremap_cache() maps decrypted */ u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & ~ms_hyperv.shared_gpa_boundary; @@ -189,7 +292,7 @@ void hv_synic_enable_regs(unsigned int cpu) siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); siefp.siefp_enabled = 1; - if (hv_isolation_type_snp() || hv_root_partition) { + if (ms_hyperv.paravisor_present || hv_root_partition) { /* Mask out vTOM bit. ioremap_cache() maps decrypted */ u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & ~ms_hyperv.shared_gpa_boundary; @@ -272,7 +375,7 @@ void hv_synic_disable_regs(unsigned int cpu) * addresses. */ simp.simp_enabled = 0; - if (hv_isolation_type_snp() || hv_root_partition) { + if (ms_hyperv.paravisor_present || hv_root_partition) { iounmap(hv_cpu->synic_message_page); hv_cpu->synic_message_page = NULL; } else { @@ -284,7 +387,7 @@ void hv_synic_disable_regs(unsigned int cpu) siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); siefp.siefp_enabled = 0; - if (hv_isolation_type_snp() || hv_root_partition) { + if (ms_hyperv.paravisor_present || hv_root_partition) { iounmap(hv_cpu->synic_event_page); hv_cpu->synic_event_page = NULL; } else { diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 0d7a3ba66396..e000fa3b9f97 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cleanup.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/mman.h> @@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct memory_notify *mem = (struct memory_notify *)v; - unsigned long flags, pfn_count; + unsigned long pfn_count; switch (val) { case MEM_ONLINE: @@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, break; case MEM_OFFLINE: - spin_lock_irqsave(&dm_device.ha_lock, flags); - pfn_count = hv_page_offline_check(mem->start_pfn, - mem->nr_pages); - if (pfn_count <= dm_device.num_pages_onlined) { - dm_device.num_pages_onlined -= pfn_count; - } else { - /* - * We're offlining more pages than we managed to online. - * This is unexpected. In any case don't let - * num_pages_onlined wrap around zero. - */ - WARN_ON_ONCE(1); - dm_device.num_pages_onlined = 0; + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + pfn_count = hv_page_offline_check(mem->start_pfn, + mem->nr_pages); + if (pfn_count <= dm_device.num_pages_onlined) { + dm_device.num_pages_onlined -= pfn_count; + } else { + /* + * We're offlining more pages than we + * managed to online. This is + * unexpected. In any case don't let + * num_pages_onlined wrap around zero. + */ + WARN_ON_ONCE(1); + dm_device.num_pages_onlined = 0; + } } - spin_unlock_irqrestore(&dm_device.ha_lock, flags); break; case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: @@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, unsigned long start_pfn; unsigned long processed_pfn; unsigned long total_pfn = pfn_count; - unsigned long flags; for (i = 0; i < (size/HA_CHUNK); i++) { start_pfn = start + (i * HA_CHUNK); - spin_lock_irqsave(&dm_device.ha_lock, flags); - has->ha_end_pfn += HA_CHUNK; + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + has->ha_end_pfn += HA_CHUNK; - if (total_pfn > HA_CHUNK) { - processed_pfn = HA_CHUNK; - total_pfn -= HA_CHUNK; - } else { - processed_pfn = total_pfn; - total_pfn = 0; - } + if (total_pfn > HA_CHUNK) { + processed_pfn = HA_CHUNK; + total_pfn -= HA_CHUNK; + } else { + processed_pfn = total_pfn; + total_pfn = 0; + } - has->covered_end_pfn += processed_pfn; - spin_unlock_irqrestore(&dm_device.ha_lock, flags); + has->covered_end_pfn += processed_pfn; + } reinit_completion(&dm_device.ol_waitevent); @@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, */ do_hot_add = false; } - spin_lock_irqsave(&dm_device.ha_lock, flags); - has->ha_end_pfn -= HA_CHUNK; - has->covered_end_pfn -= processed_pfn; - spin_unlock_irqrestore(&dm_device.ha_lock, flags); + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + has->ha_end_pfn -= HA_CHUNK; + has->covered_end_pfn -= processed_pfn; + } break; } @@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, static void hv_online_page(struct page *pg, unsigned int order) { struct hv_hotadd_state *has; - unsigned long flags; unsigned long pfn = page_to_pfn(pg); - spin_lock_irqsave(&dm_device.ha_lock, flags); + guard(spinlock_irqsave)(&dm_device.ha_lock); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* The page belongs to a different HAS. */ if ((pfn < has->start_pfn) || @@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order) hv_bring_pgs_online(has, pfn, 1UL << order); break; } - spin_unlock_irqrestore(&dm_device.ha_lock, flags); } static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) @@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) struct hv_hotadd_gap *gap; unsigned long residual, new_inc; int ret = 0; - unsigned long flags; - spin_lock_irqsave(&dm_device.ha_lock, flags); + guard(spinlock_irqsave)(&dm_device.ha_lock); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current @@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) ret = 1; break; } - spin_unlock_irqrestore(&dm_device.ha_lock, flags); return ret; } @@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start, { struct hv_hotadd_state *ha_region = NULL; int covered; - unsigned long flags; if (pfn_cnt == 0) return 0; @@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start, ha_region->covered_end_pfn = pg_start; ha_region->end_pfn = rg_start + rg_size; - spin_lock_irqsave(&dm_device.ha_lock, flags); - list_add_tail(&ha_region->list, &dm_device.ha_region_list); - spin_unlock_irqrestore(&dm_device.ha_lock, flags); + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) { + list_add_tail(&ha_region->list, &dm_device.ha_region_list); + } } do_pg_range: @@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev) struct hv_dynmem_device *dm = hv_get_drvdata(dev); struct hv_hotadd_state *has, *tmp; struct hv_hotadd_gap *gap, *tmp_gap; - unsigned long flags; if (dm->num_pages_ballooned != 0) pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); @@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev) #endif } - spin_lock_irqsave(&dm_device.ha_lock, flags); + guard(spinlock_irqsave)(&dm_device.ha_lock); list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { list_del(&gap->list); @@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev) list_del(&has->list); kfree(has); } - spin_unlock_irqrestore(&dm_device.ha_lock, flags); } static int balloon_suspend(struct hv_device *hv_dev) diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 6a2258fef1fe..ccad7bca3fd3 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -24,6 +24,7 @@ #include <linux/kmsg_dump.h> #include <linux/slab.h> #include <linux/dma-map-ops.h> +#include <linux/set_memory.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -359,6 +360,8 @@ int hv_common_cpu_init(unsigned int cpu) u64 msr_vp_index; gfp_t flags; int pgcount = hv_root_partition ? 2 : 1; + void *mem; + int ret; /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; @@ -370,14 +373,41 @@ int hv_common_cpu_init(unsigned int cpu) * allocated if this CPU was previously online and then taken offline */ if (!*inputarg) { - *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); - if (!(*inputarg)) + mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); + if (!mem) return -ENOMEM; if (hv_root_partition) { outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); - *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE; + *outputarg = (char *)mem + HV_HYP_PAGE_SIZE; + } + + if (!ms_hyperv.paravisor_present && + (hv_isolation_type_snp() || hv_isolation_type_tdx())) { + ret = set_memory_decrypted((unsigned long)mem, pgcount); + if (ret) { + /* It may be unsafe to free 'mem' */ + return ret; + } + + memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); } + + /* + * In a fully enlightened TDX/SNP VM with more than 64 VPs, if + * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() -> + * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to + * use hyperv_pcpu_input_arg as the hypercall input page, which + * must be a decrypted page in such a VM, but the page is still + * encrypted before set_memory_decrypted() returns. Fix this by + * setting *inputarg after the above set_memory_decrypted(): if + * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns + * HV_STATUS_INVALID_PARAMETER immediately, and the function + * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(), + * which may be slightly slower than the hypercall, but still + * works correctly in such a VM. + */ + *inputarg = mem; } msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX); @@ -502,6 +532,12 @@ bool __weak hv_isolation_type_snp(void) } EXPORT_SYMBOL_GPL(hv_isolation_type_snp); +bool __weak hv_isolation_type_tdx(void) +{ + return false; +} +EXPORT_SYMBOL_GPL(hv_isolation_type_tdx); + void __weak hv_setup_vmbus_handler(void (*handler)(void)) { } @@ -542,3 +578,9 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s return HV_STATUS_INVALID_PARAMETER; } EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); + +u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2) +{ + return HV_STATUS_INVALID_PARAMETER; +} +EXPORT_SYMBOL_GPL(hv_tdx_hypercall); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 55f2086841ae..f6b1e710f805 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -124,6 +124,17 @@ struct hv_per_cpu_context { void *synic_event_page; /* + * The page is only used in hv_post_message() for a TDX VM (with the + * paravisor) to post a messages to Hyper-V: when such a VM calls + * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which + * is encrypted in such a VM) as the hypercall input page, because + * the input page for HVCALL_POST_MESSAGE must be decrypted in such a + * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is + * introduced for this purpose. See hyperv_init() for more comments. + */ + void *post_msg_page; + + /* * Starting with win8, we can take channel interrupts on any CPU; * we will manage the tasklet that handles events messages on a per CPU * basis. diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 67f95a29aeca..edbb38f6956b 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2287,7 +2287,8 @@ static int vmbus_acpi_add(struct platform_device *pdev) * Some ancestor of the vmbus acpi device (Gen1 or Gen2 * firmware) is the VMOD that has the mmio ranges. Get that. */ - for (ancestor = acpi_dev_parent(device); ancestor; + for (ancestor = acpi_dev_parent(device); + ancestor && ancestor->handle != ACPI_ROOT_OBJECT; ancestor = acpi_dev_parent(ancestor)) { result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, vmbus_walk_resources, NULL); |