Merge branch 'stable/for-linus-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb update from Konrad Rzeszutek Wilk:
 "A generic (but for right now engaged only with AMD SEV) mechanism to
  adjust a larger size SWIOTLB based on the total memory of the SEV
  guests which right now require the bounce buffer for interacting with
  the outside world.

  Normal knobs (swiotlb=XYZ) still work"

* 'stable/for-linus-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  x86,swiotlb: Adjust SWIOTLB bounce buffer size for SEV guests
This commit is contained in:
Linus Torvalds 2020-12-16 13:51:34 -08:00
Родитель 009bd55dfc e998879d4f
Коммит 007c74e16c
5 изменённых файлов: 65 добавлений и 2 удалений

Просмотреть файл

@ -37,6 +37,7 @@ void __init sme_map_bootdata(char *real_mode_data);
void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void);
void __init sev_setup_arch(void);
void __init sme_encrypt_kernel(struct boot_params *bp);
void __init sme_enable(struct boot_params *bp);
@ -69,6 +70,7 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { }
static inline void __init sev_setup_arch(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }

Просмотреть файл

@ -1049,6 +1049,12 @@ void __init setup_arch(char **cmdline_p)
memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();
/*
* Needs to run after memblock setup because it needs the physical
* memory size.
*/
sev_setup_arch();
reserve_bios_regions();
efi_fake_memmap();

Просмотреть файл

@ -198,6 +198,37 @@ void __init sme_early_init(void)
swiotlb_force = SWIOTLB_FORCE;
}
void __init sev_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
unsigned long size;
if (!sev_active())
return;
/*
* For SEV, all DMA has to occur via shared/unencrypted pages.
* SEV uses SWIOTLB to make this happen without changing device
* drivers. However, depending on the workload being run, the
* default 64MB of SWIOTLB may not be enough and SWIOTLB may
* run out of buffers for DMA, resulting in I/O errors and/or
* performance degradation especially with high I/O workloads.
*
* Adjust the default size of SWIOTLB for SEV guests using
* a percentage of guest memory for SWIOTLB buffers.
* Also, as the SWIOTLB bounce buffer memory is allocated
* from low memory, ensure that the adjusted size is within
* the limits of low available memory.
*
* The percentage of guest memory used here for SWIOTLB buffers
* is more of an approximation of the static adjustment which
* 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
*/
size = total_mem * 6 / 100;
size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
swiotlb_adjust_size(size);
}
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
{
pgprot_t old_prot, new_prot;

Просмотреть файл

@ -30,6 +30,9 @@ enum swiotlb_force {
*/
#define IO_TLB_SHIFT 11
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
extern void swiotlb_init(int verbose);
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swiotlb_nr_tbl(void);
@ -78,6 +81,7 @@ void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long new_size);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@ -100,6 +104,10 @@ static inline bool is_swiotlb_active(void)
{
return false;
}
static inline void swiotlb_adjust_size(unsigned long new_size)
{
}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);

Просмотреть файл

@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
max_segment = rounddown(val, PAGE_SIZE);
}
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
unsigned long size;
@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
void __init swiotlb_adjust_size(unsigned long new_size)
{
unsigned long size;
/*
* If swiotlb parameter has not been specified, give a chance to
* architectures such as those supporting memory encryption to
* adjust/expand SWIOTLB size for their use.
*/
if (!io_tlb_nslabs) {
size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
io_tlb_nslabs = size >> IO_TLB_SHIFT;
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
}
}
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;