Merge tag 'mips-fixes_6.18_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Thomas Bogendoerfer:

 - Fix CPU type in DT for econet

 - Fix for Malta PCI MMIO breakage for SOC-it

 - Fix TLB shutdown caused by iniital uniquification

 - Fix random seg faults due to missed vdso storage requirement

* tag 'mips-fixes_6.18_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
  MIPS: kernel: Fix random segmentation faults
  MIPS: mm: Prevent a TLB shutdown on initial uniquification
  mips: dts: econet: fix EN751221 core type
  MIPS: Malta: Fix !EVA SOC-it PCI MMIO
This commit is contained in:
Linus Torvalds
2025-11-22 12:55:18 -08:00
4 changed files with 79 additions and 47 deletions

View File

@@ -18,7 +18,7 @@
cpu@0 { cpu@0 {
device_type = "cpu"; device_type = "cpu";
compatible = "mips,mips24KEc"; compatible = "mips,mips34Kc";
reg = <0>; reg = <0>;
}; };
}; };

View File

@@ -692,7 +692,7 @@ unsigned long mips_stack_top(void)
/* Space for the VDSO, data page & GIC user page */ /* Space for the VDSO, data page & GIC user page */
if (current->thread.abi) { if (current->thread.abi) {
top -= PAGE_ALIGN(current->thread.abi->vdso->size); top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE; top -= VDSO_NR_PAGES * PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0; top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space to randomize the VDSO base */ /* Space to randomize the VDSO base */

View File

@@ -15,6 +15,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/sort.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
@@ -508,54 +509,78 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb); __setup("ntlb=", set_ntlb);
/* Initialise all TLB entries with unique values */
/* Comparison function for EntryHi VPN fields. */
static int r4k_vpn_cmp(const void *a, const void *b)
{
long v = *(unsigned long *)a - *(unsigned long *)b;
int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
return s ? (v != 0) | v >> s : v;
}
/*
* Initialise all TLB entries with unique values that do not clash with
* what we have been handed over and what we'll be using ourselves.
*/
static void r4k_tlb_uniquify(void) static void r4k_tlb_uniquify(void)
{ {
int entry = num_wired_entries(); unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
int tlbsize = current_cpu_data.tlbsize;
int start = num_wired_entries();
unsigned long vpn_mask;
int cnt, ent, idx, i;
vpn_mask = GENMASK(cpu_vmbits - 1, 13);
vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
htw_stop(); htw_stop();
for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
unsigned long vpn;
write_c0_index(i);
mtc0_tlbr_hazard();
tlb_read();
tlb_read_hazard();
vpn = read_c0_entryhi();
vpn &= vpn_mask & PAGE_MASK;
tlb_vpns[cnt] = vpn;
/* Prevent any large pages from overlapping regular ones. */
write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_entrylo0(0); write_c0_entrylo0(0);
write_c0_entrylo1(0); write_c0_entrylo1(0);
while (entry < current_cpu_data.tlbsize) { idx = 0;
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data); ent = tlbsize;
unsigned long asid = 0; for (i = start; i < tlbsize; i++)
int idx; while (1) {
unsigned long entryhi, vpn;
/* Skip wired MMID to make ginvt_mmid work */ entryhi = UNIQUE_ENTRYHI(ent);
if (cpu_has_mmid) vpn = entryhi & vpn_mask & PAGE_MASK;
asid = MMID_KERNEL_WIRED + 1;
/* Check for match before using UNIQUE_ENTRYHI */ if (idx >= cnt || vpn < tlb_vpns[idx]) {
do { write_c0_entryhi(entryhi);
if (cpu_has_mmid) { write_c0_index(i);
write_c0_memorymapid(asid); mtc0_tlbw_hazard();
write_c0_entryhi(UNIQUE_ENTRYHI(entry)); tlb_write_indexed();
} else { ent++;
write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
/* No match or match is on current entry */
if (idx < 0 || idx == entry)
break; break;
/* } else if (vpn == tlb_vpns[idx]) {
* If we hit a match, we need to try again with ent++;
* a different ASID. } else {
*/ idx++;
asid++; }
} while (asid < asid_mask); }
if (idx >= 0 && idx != entry)
panic("Unable to uniquify TLB entry %d", idx);
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
tlbw_use_hazard(); tlbw_use_hazard();
htw_start(); htw_start();
@@ -602,6 +627,7 @@ static void r4k_tlb_configure(void)
/* From this point on the ARC firmware is dead. */ /* From this point on the ARC firmware is dead. */
r4k_tlb_uniquify(); r4k_tlb_uniquify();
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */ /* Did I tell you that ARC SUCKS? */
} }

View File

@@ -241,16 +241,22 @@ mips_pci_controller:
#endif #endif
/* /*
* Setup the Malta max (2GB) memory for PCI DMA in host bridge * Set up memory mapping in host bridge for PCI DMA masters,
* in transparent addressing mode. * in transparent addressing mode. For EVA use the Malta
* maximum of 2 GiB memory in the alias space at 0x80000000
* as per PHYS_OFFSET. Otherwise use 256 MiB of memory in
* the regular space, avoiding mapping the PCI MMIO window
* for DMA as it seems to confuse the system controller's
* logic, causing PCI MMIO to stop working.
*/ */
mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH; mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000;
MSC_WRITE(MSC01_PCI_BAR0, mask); MSC_WRITE(MSC01_PCI_BAR0,
MSC_WRITE(MSC01_PCI_HEAD4, mask); mask | PCI_BASE_ADDRESS_MEM_PREFETCH);
MSC_WRITE(MSC01_PCI_HEAD4,
PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH);
mask &= MSC01_PCI_BAR0_SIZE_MSK;
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask); MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask); MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET);
/* Don't handle target retries indefinitely. */ /* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) == if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==