Skip to content

Commit 6f2328e

Browse files
Automatic merge of 'next-test' into merge-test (2025-11-18 13:41)
2 parents 834cf7f + 5b3a426 commit 6f2328e

File tree

16 files changed

+106
-124
lines changed

16 files changed

+106
-124
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7192,6 +7192,9 @@
71927192
them frequently to increase the rate of SLB faults
71937193
on kernel addresses.
71947194

7195+
no_slb_preload [PPC,EARLY]
7196+
Disables slb preloading for userspace.
7197+
71957198
sunrpc.min_resvport=
71967199
sunrpc.max_resvport=
71977200
[NFS,SUNRPC]

arch/powerpc/include/asm/book3s/32/tlbflush.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
void hash__flush_tlb_mm(struct mm_struct *mm);
1212
void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
1313
void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
14+
void hash__flush_gather(struct mmu_gather *tlb);
1415

1516
#ifdef CONFIG_SMP
1617
void _tlbie(unsigned long address);
@@ -29,7 +30,9 @@ void _tlbia(void);
2930
static inline void tlb_flush(struct mmu_gather *tlb)
3031
{
3132
/* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
32-
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
33+
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
34+
hash__flush_gather(tlb);
35+
else
3336
_tlbia();
3437
}
3538

arch/powerpc/include/asm/book3s/64/mmu-hash.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,6 @@ void slb_save_contents(struct slb_entry *slb_ptr);
524524
void slb_dump_contents(struct slb_entry *slb_ptr);
525525

526526
extern void slb_vmalloc_update(void);
527-
void preload_new_slb_context(unsigned long start, unsigned long sp);
528527

529528
#ifdef CONFIG_PPC_64S_HASH_MMU
530529
void slb_set_size(u16 size);

arch/powerpc/kernel/process.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1897,18 +1897,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
18971897
return 0;
18981898
}
18991899

1900-
void preload_new_slb_context(unsigned long start, unsigned long sp);
1901-
19021900
/*
19031901
* Set up a thread for executing a new program
19041902
*/
19051903
void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
19061904
{
19071905
#ifdef CONFIG_PPC64
19081906
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1909-
1910-
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1911-
preload_new_slb_context(start, sp);
19121907
#endif
19131908

19141909
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM

arch/powerpc/mm/book3s32/tlb.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,3 +105,12 @@ void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
105105
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
106106
}
107107
EXPORT_SYMBOL(hash__flush_tlb_page);
108+
109+
void hash__flush_gather(struct mmu_gather *tlb)
110+
{
111+
if (tlb->fullmm || tlb->need_flush_all)
112+
hash__flush_tlb_mm(tlb->mm);
113+
else
114+
hash__flush_range(tlb->mm, tlb->start, tlb->end);
115+
}
116+
EXPORT_SYMBOL(hash__flush_gather);

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
#include <asm/mmu.h>
4848
#include <asm/mmu_context.h>
4949
#include <asm/page.h>
50+
#include <asm/pgalloc.h>
5051
#include <asm/types.h>
5152
#include <linux/uaccess.h>
5253
#include <asm/machdep.h>
@@ -449,6 +450,7 @@ static __init void hash_kfence_map_pool(void)
449450
{
450451
unsigned long kfence_pool_start, kfence_pool_end;
451452
unsigned long prot = pgprot_val(PAGE_KERNEL);
453+
unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
452454

453455
if (!kfence_pool)
454456
return;
@@ -459,6 +461,7 @@ static __init void hash_kfence_map_pool(void)
459461
BUG_ON(htab_bolt_mapping(kfence_pool_start, kfence_pool_end,
460462
kfence_pool, prot, mmu_linear_psize,
461463
mmu_kernel_ssize));
464+
update_page_count(mmu_linear_psize, KFENCE_POOL_SIZE >> pshift);
462465
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
463466
}
464467

@@ -952,7 +955,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
952955
block_size = be64_to_cpu(addr_prop[1]);
953956
if (block_size != (16 * GB))
954957
return 0;
955-
printk(KERN_INFO "Huge page(16GB) memory: "
958+
pr_info("Huge page(16GB) memory: "
956959
"addr = 0x%lX size = 0x%lX pages = %d\n",
957960
phys_addr, block_size, expected_pages);
958961
if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) {
@@ -1135,7 +1138,7 @@ static void __init htab_init_page_sizes(void)
11351138
mmu_vmemmap_psize = mmu_virtual_psize;
11361139
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
11371140

1138-
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
1141+
pr_info("Page orders: linear mapping = %d, "
11391142
"virtual = %d, io = %d"
11401143
#ifdef CONFIG_SPARSEMEM_VMEMMAP
11411144
", vmemmap = %d"
@@ -1234,6 +1237,7 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
12341237
int nid, pgprot_t prot)
12351238
{
12361239
int rc;
1240+
unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
12371241

12381242
if (end >= H_VMALLOC_START) {
12391243
pr_warn("Outside the supported range\n");
@@ -1251,17 +1255,22 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
12511255
mmu_kernel_ssize);
12521256
BUG_ON(rc2 && (rc2 != -ENOENT));
12531257
}
1258+
update_page_count(mmu_linear_psize, (end - start) >> pshift);
12541259
return rc;
12551260
}
12561261

12571262
int hash__remove_section_mapping(unsigned long start, unsigned long end)
12581263
{
1264+
unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
1265+
12591266
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
12601267
mmu_kernel_ssize);
12611268

12621269
if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
12631270
pr_warn("Hash collision while resizing HPT\n");
12641271

1272+
if (!rc)
1273+
update_page_count(mmu_linear_psize, -((end - start) >> pshift));
12651274
return rc;
12661275
}
12671276
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -1302,27 +1311,34 @@ static void __init htab_initialize(void)
13021311
unsigned long table;
13031312
unsigned long pteg_count;
13041313
unsigned long prot;
1305-
phys_addr_t base = 0, size = 0, end;
1314+
phys_addr_t base = 0, size = 0, end, limit = MEMBLOCK_ALLOC_ANYWHERE;
13061315
u64 i;
1316+
unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
13071317

13081318
DBG(" -> htab_initialize()\n");
13091319

1320+
if (firmware_has_feature(FW_FEATURE_LPAR))
1321+
limit = ppc64_rma_size;
1322+
13101323
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
13111324
mmu_kernel_ssize = MMU_SEGSIZE_1T;
13121325
mmu_highuser_ssize = MMU_SEGSIZE_1T;
1313-
printk(KERN_INFO "Using 1TB segments\n");
1326+
pr_info("Using 1TB segments\n");
13141327
}
13151328

13161329
if (stress_slb_enabled)
13171330
static_branch_enable(&stress_slb_key);
13181331

1332+
if (no_slb_preload)
1333+
static_branch_enable(&no_slb_preload_key);
1334+
13191335
if (stress_hpt_enabled) {
13201336
unsigned long tmp;
13211337
static_branch_enable(&stress_hpt_key);
13221338
// Too early to use nr_cpu_ids, so use NR_CPUS
13231339
tmp = memblock_phys_alloc_range(sizeof(struct stress_hpt_struct) * NR_CPUS,
13241340
__alignof__(struct stress_hpt_struct),
1325-
0, MEMBLOCK_ALLOC_ANYWHERE);
1341+
MEMBLOCK_LOW_LIMIT, limit);
13261342
memset((void *)tmp, 0xff, sizeof(struct stress_hpt_struct) * NR_CPUS);
13271343
stress_hpt_struct = __va(tmp);
13281344

@@ -1356,11 +1372,10 @@ static void __init htab_initialize(void)
13561372
mmu_hash_ops.hpte_clear_all();
13571373
#endif
13581374
} else {
1359-
unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
13601375

13611376
table = memblock_phys_alloc_range(htab_size_bytes,
13621377
htab_size_bytes,
1363-
0, limit);
1378+
MEMBLOCK_LOW_LIMIT, limit);
13641379
if (!table)
13651380
panic("ERROR: Failed to allocate %pa bytes below %pa\n",
13661381
&htab_size_bytes, &limit);
@@ -1392,8 +1407,8 @@ static void __init htab_initialize(void)
13921407
size = end - base;
13931408
base = (unsigned long)__va(base);
13941409

1395-
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
1396-
base, size, prot);
1410+
pr_debug("creating mapping for region: 0x%pa..0x%pa (prot: %lx)\n",
1411+
&base, &size, prot);
13971412

13981413
if ((base + size) >= H_VMALLOC_START) {
13991414
pr_warn("Outside the supported range\n");
@@ -1402,6 +1417,8 @@ static void __init htab_initialize(void)
14021417

14031418
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
14041419
prot, mmu_linear_psize, mmu_kernel_ssize));
1420+
1421+
update_page_count(mmu_linear_psize, size >> pshift);
14051422
}
14061423
hash_kfence_map_pool();
14071424
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
@@ -1423,6 +1440,8 @@ static void __init htab_initialize(void)
14231440
BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
14241441
__pa(tce_alloc_start), prot,
14251442
mmu_linear_psize, mmu_kernel_ssize));
1443+
update_page_count(mmu_linear_psize,
1444+
(tce_alloc_end - tce_alloc_start) >> pshift);
14261445
}
14271446

14281447

@@ -1867,7 +1886,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
18671886
* in vmalloc space, so switch vmalloc
18681887
* to 4k pages
18691888
*/
1870-
printk(KERN_ALERT "Reducing vmalloc segment "
1889+
pr_alert("Reducing vmalloc segment "
18711890
"to 4kB pages because of "
18721891
"non-cacheable mapping\n");
18731892
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
@@ -2432,6 +2451,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n")
24322451

24332452
static int __init hash64_debugfs(void)
24342453
{
2454+
if (radix_enabled())
2455+
return 0;
24352456
debugfs_create_file("hpt_order", 0600, arch_debugfs_dir, NULL,
24362457
&fops_hpt_order);
24372458
return 0;

arch/powerpc/mm/book3s64/internal.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,14 @@ static inline bool stress_hpt(void)
2222
return static_branch_unlikely(&stress_hpt_key);
2323
}
2424

25-
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
25+
extern bool no_slb_preload;
26+
DECLARE_STATIC_KEY_FALSE(no_slb_preload_key);
27+
static inline bool slb_preload_disabled(void)
28+
{
29+
return static_branch_unlikely(&no_slb_preload_key);
30+
}
2631

27-
void slb_setup_new_exec(void);
32+
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
2833

2934
void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
3035

arch/powerpc/mm/book3s64/mmu_context.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,8 +150,6 @@ static int hash__init_new_context(struct mm_struct *mm)
150150
void hash__setup_new_exec(void)
151151
{
152152
slice_setup_new_exec();
153-
154-
slb_setup_new_exec();
155153
}
156154
#else
157155
static inline int hash__init_new_context(struct mm_struct *mm)

arch/powerpc/mm/book3s64/pgtable.c

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -510,20 +510,21 @@ atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
510510

511511
void arch_report_meminfo(struct seq_file *m)
512512
{
513-
/*
514-
* Hash maps the memory with one size mmu_linear_psize.
515-
* So don't bother to print these on hash
516-
*/
517-
if (!radix_enabled())
518-
return;
519513
seq_printf(m, "DirectMap4k: %8lu kB\n",
520514
atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
521-
seq_printf(m, "DirectMap64k: %8lu kB\n",
515+
seq_printf(m, "DirectMap64k: %8lu kB\n",
522516
atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
523-
seq_printf(m, "DirectMap2M: %8lu kB\n",
524-
atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
525-
seq_printf(m, "DirectMap1G: %8lu kB\n",
526-
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
517+
if (radix_enabled()) {
518+
seq_printf(m, "DirectMap2M: %8lu kB\n",
519+
atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
520+
seq_printf(m, "DirectMap1G: %8lu kB\n",
521+
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
522+
} else {
523+
seq_printf(m, "DirectMap16M: %8lu kB\n",
524+
atomic_long_read(&direct_pages_count[MMU_PAGE_16M]) << 14);
525+
seq_printf(m, "DirectMap16G: %8lu kB\n",
526+
atomic_long_read(&direct_pages_count[MMU_PAGE_16G]) << 24);
527+
}
527528
}
528529
#endif /* CONFIG_PROC_FS */
529530

0 commit comments

Comments
 (0)