1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * A test for the patch "Allow compaction of unevictable pages". 5 * With this patch we should be able to allocate at least 1/4 6 * of RAM in huge pages. Without the patch much less is 7 * allocated. 8 */ 9 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <sys/mman.h> 13 #include <sys/resource.h> 14 #include <fcntl.h> 15 #include <errno.h> 16 #include <unistd.h> 17 #include <string.h> 18 19 #include "../kselftest.h" 20 21 #define MAP_SIZE_MB 100 22 #define MAP_SIZE (MAP_SIZE_MB * 1024 * 1024) 23 24 struct map_list { 25 void *map; 26 struct map_list *next; 27 }; 28 29 int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize) 30 { 31 char buffer[256] = {0}; 32 char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'"; 33 FILE *cmdfile = popen(cmd, "r"); 34 35 if (!(fgets(buffer, sizeof(buffer), cmdfile))) { 36 ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno)); 37 return -1; 38 } 39 40 pclose(cmdfile); 41 42 *memfree = atoll(buffer); 43 cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'"; 44 cmdfile = popen(cmd, "r"); 45 46 if (!(fgets(buffer, sizeof(buffer), cmdfile))) { 47 ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno)); 48 return -1; 49 } 50 51 pclose(cmdfile); 52 *hugepagesize = atoll(buffer); 53 54 return 0; 55 } 56 57 int prereq(void) 58 { 59 char allowed; 60 int fd; 61 62 fd = open("/proc/sys/vm/compact_unevictable_allowed", 63 O_RDONLY | O_NONBLOCK); 64 if (fd < 0) { 65 ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n", 66 strerror(errno)); 67 return -1; 68 } 69 70 if (read(fd, &allowed, sizeof(char)) != sizeof(char)) { 71 ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n", 72 strerror(errno)); 73 close(fd); 74 return -1; 75 } 76 77 close(fd); 78 if (allowed == '1') 79 return 0; 80 81 ksft_print_msg("Compaction isn't allowed\n"); 82 return -1; 83 } 84 85 int check_compaction(unsigned long mem_free, unsigned long hugepage_size, 86 unsigned long initial_nr_hugepages) 87 { 88 unsigned long nr_hugepages_ul; 89 int fd, ret = -1; 90 int compaction_index = 0; 91 char nr_hugepages[20] = {0}; 92 char init_nr_hugepages[24] = {0}; 93 94 snprintf(init_nr_hugepages, sizeof(init_nr_hugepages), 95 "%lu", initial_nr_hugepages); 96 97 /* We want to test with 80% of available memory. Else, OOM killer comes 98 in to play */ 99 mem_free = mem_free * 0.8; 100 101 fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK); 102 if (fd < 0) { 103 ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n", 104 strerror(errno)); 105 ret = -1; 106 goto out; 107 } 108 109 /* Request a large number of huge pages. The Kernel will allocate 110 as much as it can */ 111 if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) { 112 ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n", 113 strerror(errno)); 114 goto close_fd; 115 } 116 117 lseek(fd, 0, SEEK_SET); 118 119 if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) { 120 ksft_print_msg("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n", 121 strerror(errno)); 122 goto close_fd; 123 } 124 125 /* We should have been able to request at least 1/3 rd of the memory in 126 huge pages */ 127 nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10); 128 if (!nr_hugepages_ul) { 129 ksft_print_msg("ERROR: No memory is available as huge pages\n"); 130 goto close_fd; 131 } 132 compaction_index = mem_free/(nr_hugepages_ul * hugepage_size); 133 134 lseek(fd, 0, SEEK_SET); 135 136 if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages)) 137 != strlen(init_nr_hugepages)) { 138 ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n", 139 strerror(errno)); 140 goto close_fd; 141 } 142 143 ksft_print_msg("Number of huge pages allocated = %lu\n", 144 nr_hugepages_ul); 145 146 if (compaction_index > 3) { 147 ksft_print_msg("ERROR: Less than 1/%d of memory is available\n" 148 "as huge pages\n", compaction_index); 149 goto close_fd; 150 } 151 152 ret = 0; 153 154 close_fd: 155 close(fd); 156 out: 157 ksft_test_result(ret == 0, "check_compaction\n"); 158 return ret; 159 } 160 161 int set_zero_hugepages(unsigned long *initial_nr_hugepages) 162 { 163 int fd, ret = -1; 164 char nr_hugepages[20] = {0}; 165 166 fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK); 167 if (fd < 0) { 168 ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n", 169 strerror(errno)); 170 goto out; 171 } 172 if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) { 173 ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n", 174 strerror(errno)); 175 goto close_fd; 176 } 177 178 lseek(fd, 0, SEEK_SET); 179 180 /* Start with the initial condition of 0 huge pages */ 181 if (write(fd, "", sizeof(char)) != sizeof(char)) { 182 ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n", 183 strerror(errno)); 184 goto close_fd; 185 } 186 187 *initial_nr_hugepages = strtoul(nr_hugepages, NULL, 10); 188 ret = 0; 189 190 close_fd: 191 close(fd); 192 193 out: 194 return ret; 195 } 196 197 int main(int argc, char **argv) 198 { 199 struct rlimit lim; 200 struct map_list *list = NULL, *entry; 201 size_t page_size, i; 202 void *map = NULL; 203 unsigned long mem_free = 0; 204 unsigned long hugepage_size = 0; 205 long mem_fragmentable_MB = 0; 206 unsigned long initial_nr_hugepages; 207 208 ksft_print_header(); 209 210 if (prereq() || geteuid()) 211 ksft_exit_skip("Prerequisites unsatisfied\n"); 212 213 ksft_set_plan(1); 214 215 /* Start the test without hugepages reducing mem_free */ 216 if (set_zero_hugepages(&initial_nr_hugepages)) 217 ksft_exit_fail(); 218 219 lim.rlim_cur = RLIM_INFINITY; 220 lim.rlim_max = RLIM_INFINITY; 221 if (setrlimit(RLIMIT_MEMLOCK, &lim)) 222 ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno)); 223 224 page_size = getpagesize(); 225 226 if (read_memory_info(&mem_free, &hugepage_size) != 0) 227 ksft_exit_fail_msg("Failed to get meminfo\n"); 228 229 mem_fragmentable_MB = mem_free * 0.8 / 1024; 230 231 while (mem_fragmentable_MB > 0) { 232 map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, 233 MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0); 234 if (map == MAP_FAILED) 235 break; 236 237 entry = malloc(sizeof(struct map_list)); 238 if (!entry) { 239 munmap(map, MAP_SIZE); 240 break; 241 } 242 entry->map = map; 243 entry->next = list; 244 list = entry; 245 246 /* Write something (in this case the address of the map) to 247 * ensure that KSM can't merge the mapped pages 248 */ 249 for (i = 0; i < MAP_SIZE; i += page_size) 250 *(unsigned long *)(map + i) = (unsigned long)map + i; 251 252 mem_fragmentable_MB -= MAP_SIZE_MB; 253 } 254 255 for (entry = list; entry != NULL; entry = entry->next) { 256 munmap(entry->map, MAP_SIZE); 257 if (!entry->next) 258 break; 259 entry = entry->next; 260 } 261 262 if (check_compaction(mem_free, hugepage_size, 263 initial_nr_hugepages) == 0) 264 ksft_exit_pass(); 265 266 ksft_exit_fail(); 267 } 268
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.