summaryrefslogtreecommitdiff
path: root/arch/x86/platform/uv
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 17:38:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 17:38:43 -0700
commit5183a617ecbf01805c4abb33c3165a276eec7234 (patch)
tree984900caecdb82acd1291d10ff444dec5a90da0c /arch/x86/platform/uv
parente96ec8cf9ca12a8d6b3b896a2eccd4b92a1893ab (diff)
parent3bcf25a40b018e632d70bb866d75746748953fbc (diff)
Merge tag 'x86-platform-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar: "The biggest change is the removal of SGI UV1 support, which allowed the removal of the legacy EFI old_mmap code as well. This removes quite a bunch of old code & quirks" * tag 'x86-platform-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/efi: Remove unused EFI_UV1_MEMMAP code x86/platform/uv: Remove uv bios and efi code related to EFI_UV1_MEMMAP x86/efi: Remove references to no-longer-used efi_have_uv1_memmap() x86/efi: Delete SGI UV1 detection. x86/platform/uv: Remove efi=old_map command line option x86/platform/uv: Remove vestigial mention of UV1 platform from bios header x86/platform/uv: Remove support for UV1 platform from uv x86/platform/uv: Remove support for uv1 platform from uv_hub x86/platform/uv: Remove support for UV1 platform from uv_bau x86/platform/uv: Remove support for UV1 platform from uv_mmrs x86/platform/uv: Remove support for UV1 platform from x2apic_uv_x x86/platform/uv: Remove support for UV1 platform from uv_tlb x86/platform/uv: Remove support for UV1 platform from uv_time
Diffstat (limited to 'arch/x86/platform/uv')
-rw-r--r--arch/x86/platform/uv/bios_uv.c173
-rw-r--r--arch/x86/platform/uv/tlb_uv.c243
-rw-r--r--arch/x86/platform/uv/uv_time.c16
3 files changed, 39 insertions, 393 deletions
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4494589a288a..a6e5f2c1805d 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -30,17 +30,7 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
*/
return BIOS_STATUS_UNIMPLEMENTED;
- /*
- * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
- * callback method, which uses efi_call() directly, with the kernel page tables:
- */
- if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
- kernel_fpu_begin();
- ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
- kernel_fpu_end();
- } else {
- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
- }
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
return ret;
}
@@ -209,164 +199,3 @@ int uv_bios_init(void)
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
return 0;
}
-
-static void __init early_code_mapping_set_exec(int executable)
-{
- efi_memory_desc_t *md;
-
- if (!(__supported_pte_mask & _PAGE_NX))
- return;
-
- /* Make EFI service code area executable */
- for_each_efi_memory_desc(md) {
- if (md->type == EFI_RUNTIME_SERVICES_CODE ||
- md->type == EFI_BOOT_SERVICES_CODE)
- efi_set_executable(md, executable);
- }
-}
-
-void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
-{
- /*
- * After the lock is released, the original page table is restored.
- */
- int pgd_idx, i;
- int nr_pgds;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
-
- nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
-
- for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
- pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
- set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
-
- if (!pgd_present(*pgd))
- continue;
-
- for (i = 0; i < PTRS_PER_P4D; i++) {
- p4d = p4d_offset(pgd,
- pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
-
- if (!p4d_present(*p4d))
- continue;
-
- pud = (pud_t *)p4d_page_vaddr(*p4d);
- pud_free(&init_mm, pud);
- }
-
- p4d = (p4d_t *)pgd_page_vaddr(*pgd);
- p4d_free(&init_mm, p4d);
- }
-
- kfree(save_pgd);
-
- __flush_tlb_all();
- early_code_mapping_set_exec(0);
-}
-
-pgd_t * __init efi_uv1_memmap_phys_prolog(void)
-{
- unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
- pgd_t *save_pgd, *pgd_k, *pgd_efi;
- p4d_t *p4d, *p4d_k, *p4d_efi;
- pud_t *pud;
-
- int pgd;
- int n_pgds, i, j;
-
- early_code_mapping_set_exec(1);
-
- n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
- save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
- if (!save_pgd)
- return NULL;
-
- /*
- * Build 1:1 identity mapping for UV1 memmap usage. Note that
- * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
- * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
- * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
- * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
- * This means here we can only reuse the PMD tables of the direct mapping.
- */
- for (pgd = 0; pgd < n_pgds; pgd++) {
- addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
- vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
- pgd_efi = pgd_offset_k(addr_pgd);
- save_pgd[pgd] = *pgd_efi;
-
- p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
- if (!p4d) {
- pr_err("Failed to allocate p4d table!\n");
- goto out;
- }
-
- for (i = 0; i < PTRS_PER_P4D; i++) {
- addr_p4d = addr_pgd + i * P4D_SIZE;
- p4d_efi = p4d + p4d_index(addr_p4d);
-
- pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
- if (!pud) {
- pr_err("Failed to allocate pud table!\n");
- goto out;
- }
-
- for (j = 0; j < PTRS_PER_PUD; j++) {
- addr_pud = addr_p4d + j * PUD_SIZE;
-
- if (addr_pud > (max_pfn << PAGE_SHIFT))
- break;
-
- vaddr = (unsigned long)__va(addr_pud);
-
- pgd_k = pgd_offset_k(vaddr);
- p4d_k = p4d_offset(pgd_k, vaddr);
- pud[j] = *pud_offset(p4d_k, vaddr);
- }
- }
- pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
- }
-
- __flush_tlb_all();
- return save_pgd;
-out:
- efi_uv1_memmap_phys_epilog(save_pgd);
- return NULL;
-}
-
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
- u32 type, u64 attribute)
-{
- unsigned long last_map_pfn;
-
- if (type == EFI_MEMORY_MAPPED_IO)
- return ioremap(phys_addr, size);
-
- last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
- PAGE_KERNEL);
- if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
- unsigned long top = last_map_pfn << PAGE_SHIFT;
- efi_ioremap(top, size - (top - phys_addr), type, attribute);
- }
-
- if (!(attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
-
- return (void __iomem *)__va(phys_addr);
-}
-
-static int __init arch_parse_efi_cmdline(char *str)
-{
- if (!str) {
- pr_warn("need at least one option\n");
- return -EINVAL;
- }
-
- if (!efi_is_mixed() && parse_option_str(str, "old_map"))
- set_bit(EFI_UV1_MEMMAP, &efi.flags);
-
- return 0;
-}
-early_param("efi", arch_parse_efi_cmdline);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0ac96ca304c7..62ea907668f8 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -23,18 +23,6 @@
static struct bau_operations ops __ro_after_init;
-/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
-static const int timeout_base_ns[] = {
- 20,
- 160,
- 1280,
- 10240,
- 81920,
- 655360,
- 5242880,
- 167772160
-};
-
static int timeout_us;
static bool nobau = true;
static int nobau_perm;
@@ -510,70 +498,6 @@ static inline void end_uvhub_quiesce(struct bau_control *hmaster)
atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}
-static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
-{
- unsigned long descriptor_status;
-
- descriptor_status = uv_read_local_mmr(mmr_offset);
- descriptor_status >>= right_shift;
- descriptor_status &= UV_ACT_STATUS_MASK;
- return descriptor_status;
-}
-
-/*
- * Wait for completion of a broadcast software ack message
- * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
- */
-static int uv1_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- unsigned long descriptor_status;
- cycles_t ttm;
- u64 mmr_offset = bcp->status_mmr;
- int right_shift = bcp->status_index;
- struct ptc_stats *stat = bcp->statp;
-
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- /* spin on the status MMR, waiting for it to go idle */
- while ((descriptor_status != DS_IDLE)) {
- /*
- * Our software ack messages may be blocked because
- * there are no swack resources available. As long
- * as none of them has timed out hardware will NACK
- * our message and its state will stay IDLE.
- */
- if (descriptor_status == DS_SOURCE_TIMEOUT) {
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
- } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
- stat->s_dtimeout++;
- ttm = get_cycles();
-
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
- }
-
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
- } else {
- /*
- * descriptor_status is still BUSY
- */
- cpu_relax();
- }
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
/*
* UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
* But not currently used.
@@ -853,24 +777,6 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
}
/*
- * Because of a uv1 hardware bug only a limited number of concurrent
- * requests can be made.
- */
-static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
-{
- spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
- if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
- stat->s_throttles++;
- do {
- cpu_relax();
- } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
- }
-}
-
-/*
* Handle the completion status of a message send.
*/
static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
@@ -899,50 +805,30 @@ static int uv_flush_send_and_wait(struct cpumask *flush_mask,
{
int seq_number = 0;
int completion_stat = 0;
- int uv1 = 0;
long try = 0;
unsigned long index;
cycles_t time1;
cycles_t time2;
struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster = bcp->uvhub_master;
- struct uv1_bau_msg_header *uv1_hdr = NULL;
struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
- if (bcp->uvhub_version == UV_BAU_V1) {
- uv1 = 1;
- uv1_throttle(hmaster, stat);
- }
-
while (hmaster->uvhub_quiesce)
cpu_relax();
time1 = get_cycles();
- if (uv1)
- uv1_hdr = &bau_desc->header.uv1_hdr;
- else
- /* uv2 and uv3 */
- uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
+ uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
do {
if (try == 0) {
- if (uv1)
- uv1_hdr->msg_type = MSG_REGULAR;
- else
- uv2_3_hdr->msg_type = MSG_REGULAR;
+ uv2_3_hdr->msg_type = MSG_REGULAR;
seq_number = bcp->message_number++;
} else {
- if (uv1)
- uv1_hdr->msg_type = MSG_RETRY;
- else
- uv2_3_hdr->msg_type = MSG_RETRY;
+ uv2_3_hdr->msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
- if (uv1)
- uv1_hdr->sequence = seq_number;
- else
- uv2_3_hdr->sequence = seq_number;
+ uv2_3_hdr->sequence = seq_number;
index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
@@ -1162,11 +1048,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
address = TLB_FLUSH_ALL;
switch (bcp->uvhub_version) {
- case UV_BAU_V1:
case UV_BAU_V2:
case UV_BAU_V3:
- bau_desc->payload.uv1_2_3.address = address;
- bau_desc->payload.uv1_2_3.sending_cpu = cpu;
+ bau_desc->payload.uv2_3.address = address;
+ bau_desc->payload.uv2_3.sending_cpu = cpu;
break;
case UV_BAU_V4:
bau_desc->payload.uv4.address = address;
@@ -1300,7 +1185,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
if (bcp->uvhub_version == UV_BAU_V2)
process_uv2_message(&msgdesc, bcp);
else
- /* no error workaround for uv1 or uv3 */
+ /* no error workaround for uv3 */
bau_process_message(&msgdesc, bcp, 1);
msg++;
@@ -1350,12 +1235,7 @@ static void __init enable_timeouts(void)
mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
write_mmr_misc_control(pnode, mmr_image);
- /*
- * UV1:
- * Subsequent reversals of the timebase bit (3) cause an
- * immediate timeout of one or all INTD resources as
- * indicated in bits 2:0 (7 causes all of them to timeout).
- */
+
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
/* do not touch the legacy mode bit */
@@ -1711,14 +1591,12 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
- int uv1 = 0;
unsigned long gpa;
unsigned long m;
unsigned long n;
size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
- struct uv1_bau_msg_header *uv1_hdr;
struct uv2_3_bau_msg_header *uv2_3_hdr;
struct bau_control *bcp;
@@ -1733,8 +1611,6 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
m = ops.bau_gpa_to_offset(gpa);
- if (is_uv1_hub())
- uv1 = 1;
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode,
@@ -1746,37 +1622,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
*/
for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- if (uv1) {
- uv1_hdr = &bd2->header.uv1_hdr;
- uv1_hdr->swack_flag = 1;
- /*
- * The base_dest_nasid set in the message header
- * is the nasid of the first uvhub in the partition.
- * The bit map will indicate destination pnode numbers
- * relative to that base. They may not be consecutive
- * if nasid striding is being used.
- */
- uv1_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv1_hdr->command = UV_NET_ENDPOINT_INTD;
- uv1_hdr->int_both = 1;
- /*
- * all others need to be set to zero:
- * fairness chaining multilevel count replied_to
- */
- } else {
- /*
- * BIOS uses legacy mode, but uv2 and uv3 hardware always
- * uses native mode for selective broadcasts.
- */
- uv2_3_hdr = &bd2->header.uv2_3_hdr;
- uv2_3_hdr->swack_flag = 1;
- uv2_3_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
- }
+ /*
+ * BIOS uses legacy mode, but uv2 and uv3 hardware always
+ * uses native mode for selective broadcasts.
+ */
+ uv2_3_hdr = &bd2->header.uv2_3_hdr;
+ uv2_3_hdr->swack_flag = 1;
+ uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+ uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
+ uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
}
for_each_present_cpu(cpu) {
if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1861,7 +1715,7 @@ static void __init init_uvhub(int uvhub, int vector, int base_pnode)
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
- apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
+ apicid = uvhub_to_first_apicid(uvhub);
write_mmr_data_config(pnode, ((apicid << 32) | vector));
}
@@ -1874,33 +1728,20 @@ static int calculate_destination_timeout(void)
{
unsigned long mmr_image;
int mult1;
- int mult2;
- int index;
int base;
int ret;
- unsigned long ts_ns;
-
- if (is_uv1_hub()) {
- mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
- index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
- mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
- mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- ts_ns = timeout_base_ns[index];
- ts_ns *= (mult1 * mult2);
- ret = ts_ns / 1000;
- } else {
- /* same destination timeout for uv2 and uv3 */
- /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
- mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
- if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- base = 80;
- else
- base = 10;
- mult1 = mmr_image & UV2_ACK_MASK;
- ret = mult1 * base;
- }
+
+ /* same destination timeout for uv2 and uv3 */
+ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+ base = 80;
+ else
+ base = 10;
+ mult1 = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+
return ret;
}
@@ -2039,9 +1880,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = *smasterp;
bcp->uvhub = bdp->uvhub;
- if (is_uv1_hub())
- bcp->uvhub_version = UV_BAU_V1;
- else if (is_uv2_hub())
+ if (is_uv2_hub())
bcp->uvhub_version = UV_BAU_V2;
else if (is_uv3_hub())
bcp->uvhub_version = UV_BAU_V3;
@@ -2123,7 +1962,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
struct uvhub_desc *uvhub_descs;
unsigned char *uvhub_mask = NULL;
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
@@ -2151,17 +1990,6 @@ fail:
return 1;
}
-static const struct bau_operations uv1_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_offset,
- .read_l_sw_ack = read_mmr_sw_ack,
- .read_g_sw_ack = read_gmmr_sw_ack,
- .write_l_sw_ack = write_mmr_sw_ack,
- .write_g_sw_ack = write_gmmr_sw_ack,
- .write_payload_first = write_mmr_payload_first,
- .write_payload_last = write_mmr_payload_last,
- .wait_completion = uv1_wait_completion,
-};
-
static const struct bau_operations uv2_3_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
@@ -2206,8 +2034,6 @@ static int __init uv_bau_init(void)
ops = uv2_3_bau_ops;
else if (is_uv2_hub())
ops = uv2_3_bau_ops;
- else if (is_uv1_hub())
- ops = uv1_bau_ops;
nuvhubs = uv_num_possible_blades();
if (nuvhubs < 2) {
@@ -2228,7 +2054,7 @@ static int __init uv_bau_init(void)
}
/* software timeouts are not supported on UV4 */
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
@@ -2251,8 +2077,7 @@ static int __init uv_bau_init(void)
val = 1L << 63;
write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- if (!is_uv1_hub())
- write_mmr_data_broadcast(pnode, mmr);
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 7af31b245636..f82a1337a608 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -74,7 +74,6 @@ static void uv_rtc_send_IPI(int cpu)
apicid = cpu_physical_id(cpu);
pnode = uv_apicid_to_pnode(apicid);
- apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
@@ -85,10 +84,7 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- if (is_uv1_hub())
- return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
- UV1H_EVENT_OCCURRED0_RTC1_MASK;
- else if (is_uvx_hub())
+ if (is_uvx_hub())
return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
UVXH_EVENT_OCCURRED2_RTC_1_MASK;
return 0;
@@ -98,19 +94,15 @@ static int uv_intr_pending(int pnode)
static int uv_setup_intr(int cpu, u64 expires)
{
u64 val;
- unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
+ unsigned long apicid = cpu_physical_id(cpu);
int pnode = uv_cpu_to_pnode(cpu);
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- if (is_uv1_hub())
- uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
- UV1H_EVENT_OCCURRED0_RTC1_MASK);
- else
- uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
- UVXH_EVENT_OCCURRED2_RTC_1_MASK);
+ uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
+ UVXH_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);