tree: https://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-next.git slab-priority head: bd319083ec02fd19b9f3522935d3c6c0528e1864 commit: bd319083ec02fd19b9f3522935d3c6c0528e1864 [1/1] mm: use sc->priority for slab shrink targets config: i386-randconfig-n0-201746 (attached as .config) compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026 reproduce: git checkout bd319083ec02fd19b9f3522935d3c6c0528e1864 # save the attached .config to linux build tree make ARCH=i386
All errors (new ones prefixed by >>): mm/vmscan.o: In function `do_shrink_slab': >> mm/vmscan.c:336: undefined reference to `__udivdi3' vim +336 mm/vmscan.c 308 309 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, 310 struct shrinker *shrinker, int priority) 311 { 312 unsigned long freed = 0; 313 unsigned long long delta; 314 long total_scan; 315 long freeable; 316 long nr; 317 long new_nr; 318 int nid = shrinkctl->nid; 319 long batch_size = shrinker->batch ? shrinker->batch 320 : SHRINK_BATCH; 321 long scanned = 0, next_deferred; 322 323 freeable = shrinker->count_objects(shrinker, shrinkctl); 324 if (freeable == 0) 325 return 0; 326 327 /* 328 * copy the current shrinker scan count into a local variable 329 * and zero it so that other concurrent shrinker invocations 330 * don't also do this scanning work. 331 */ 332 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); 333 334 total_scan = nr; 335 delta = freeable >> priority; > 336 delta = (4 * delta) / shrinker->seeks; 337 total_scan += delta; 338 if (total_scan < 0) { 339 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 340 shrinker->scan_objects, total_scan); 341 total_scan = freeable; 342 next_deferred = nr; 343 } else 344 next_deferred = total_scan; 345 346 /* 347 * We need to avoid excessive windup on filesystem shrinkers 348 * due to large numbers of GFP_NOFS allocations causing the 349 * shrinkers to return -1 all the time. This results in a large 350 * nr being built up so when a shrink that can do some work 351 * comes along it empties the entire cache due to nr >>> 352 * freeable. This is bad for sustaining a working set in 353 * memory. 354 * 355 * Hence only allow the shrinker to scan the entire cache when 356 * a large delta change is calculated directly. 357 */ 358 if (delta < freeable / 4) 359 total_scan = min(total_scan, freeable / 2); 360 361 /* 362 * Avoid risking looping forever due to too large nr value: 363 * never try to free more than twice the estimate number of 364 * freeable entries. 365 */ 366 if (total_scan > freeable * 2) 367 total_scan = freeable * 2; 368 369 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, 370 freeable, delta, total_scan, priority); 371 372 /* 373 * Normally, we should not scan less than batch_size objects in one 374 * pass to avoid too frequent shrinker calls, but if the slab has less 375 * than batch_size objects in total and we are really tight on memory, 376 * we will try to reclaim all available objects, otherwise we can end 377 * up failing allocations although there are plenty of reclaimable 378 * objects spread over several slabs with usage less than the 379 * batch_size. 380 * 381 * We detect the "tight on memory" situations by looking at the total 382 * number of objects we want to scan (total_scan). If it is greater 383 * than the total number of objects on slab (freeable), we must be 384 * scanning at high prio and therefore should try to reclaim as much as 385 * possible. 386 */ 387 while (total_scan >= batch_size || 388 total_scan >= freeable) { 389 unsigned long ret; 390 unsigned long nr_to_scan = min(batch_size, total_scan); 391 392 shrinkctl->nr_to_scan = nr_to_scan; 393 shrinkctl->nr_scanned = nr_to_scan; 394 ret = shrinker->scan_objects(shrinker, shrinkctl); 395 if (ret == SHRINK_STOP) 396 break; 397 freed += ret; 398 399 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); 400 total_scan -= shrinkctl->nr_scanned; 401 scanned += shrinkctl->nr_scanned; 402 403 cond_resched(); 404 } 405 406 if (next_deferred >= scanned) 407 next_deferred -= scanned; 408 else 409 next_deferred = 0; 410 /* 411 * move the unused scan count back into the shrinker in a 412 * manner that handles concurrent updates. If we exhausted the 413 * scan, there is no need to do an update. 414 */ 415 if (next_deferred > 0) 416 new_nr = atomic_long_add_return(next_deferred, 417 &shrinker->nr_deferred[nid]); 418 else 419 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); 420 421 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); 422 return freed; 423 } 424 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
.config.gz
Description: application/gzip