Reduce VRAM budget on one perf bot

This will let us see the reordered_dags_over_budget stat
in perf so that we know our fallback code path is getting tested.

Bug: skia:10877
Change-Id: I073c8622ddb8f3449511193bac045bfebc4e277c
Cq-Include-Trybots: luci.skia.skia.primary:Perf-Debian10-Clang-NUC7i5BNK-GPU-IntelIris640-x86_64-Release-All
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/398217
Reviewed-by: Ravi Mistry <rmistry@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
Auto-Submit: Adlai Holler <adlai@google.com>
diff --git a/infra/bots/README.recipes.md b/infra/bots/README.recipes.md
index 8f760bc..6a508c7 100644
--- a/infra/bots/README.recipes.md
+++ b/infra/bots/README.recipes.md
@@ -463,7 +463,7 @@
 
 [DEPS](/infra/bots/recipes/skpbench.py#12): [recipe\_engine/context][recipe_engine/recipe_modules/context], [recipe\_engine/file][recipe_engine/recipe_modules/file], [recipe\_engine/path][recipe_engine/recipe_modules/path], [recipe\_engine/platform][recipe_engine/recipe_modules/platform], [recipe\_engine/properties][recipe_engine/recipe_modules/properties], [recipe\_engine/python][recipe_engine/recipe_modules/python], [recipe\_engine/raw\_io][recipe_engine/recipe_modules/raw_io], [recipe\_engine/step][recipe_engine/recipe_modules/step], [recipe\_engine/time][recipe_engine/recipe_modules/time], [flavor](#recipe_modules-flavor), [run](#recipe_modules-run), [vars](#recipe_modules-vars)
 
-&mdash; **def [RunSteps](/infra/bots/recipes/skpbench.py#155)(api):**
+&mdash; **def [RunSteps](/infra/bots/recipes/skpbench.py#158)(api):**
 
 &mdash; **def [skpbench\_steps](/infra/bots/recipes/skpbench.py#41)(api):**
 
diff --git a/infra/bots/gen_tasks_logic/skpbench_flags.go b/infra/bots/gen_tasks_logic/skpbench_flags.go
index 15af4e2..43348c2 100644
--- a/infra/bots/gen_tasks_logic/skpbench_flags.go
+++ b/infra/bots/gen_tasks_logic/skpbench_flags.go
@@ -8,4 +8,7 @@
 	if b.model(REDUCE_OPS_TASK_SPLITTING_MODELS...) {
 		b.recipeProp("reduce_ops_task_splitting", "true")
 	}
+	if b.model("NUC7i5BNK") {
+		b.recipeProp("gpu_resource_cache_limit", "16777216")
+	}
 }
diff --git a/infra/bots/recipes/skpbench.expected/trybot.json b/infra/bots/recipes/skpbench.expected/trybot.json
index a55ab44..16d1b21 100644
--- a/infra/bots/recipes/skpbench.expected/trybot.json
+++ b/infra/bots/recipes/skpbench.expected/trybot.json
@@ -300,7 +300,9 @@
       "--adb_binary",
       "adb.1.0.35",
       "/sdcard/revenge_of_the_skiabot/skps",
-      "--reduceOpsTaskSplitting"
+      "--reduceOpsTaskSplitting",
+      "--gpuResourceCacheLimit",
+      "16777216"
     ],
     "env": {
       "CHROME_HEADLESS": "1",
diff --git a/infra/bots/recipes/skpbench.py b/infra/bots/recipes/skpbench.py
index 73e6487..e132bb7 100644
--- a/infra/bots/recipes/skpbench.py
+++ b/infra/bots/recipes/skpbench.py
@@ -113,6 +113,9 @@
   if api.properties.get('reduce_ops_task_splitting') == 'true':
     skpbench_args += ['--reduceOpsTaskSplitting']
 
+  if api.properties.get('gpu_resource_cache_limit'):
+    skpbench_args += ['--gpuResourceCacheLimit', api.properties.get('gpu_resource_cache_limit')]
+
   api.run(api.python, 'skpbench',
       script=skpbench_dir.join('skpbench.py'),
       args=skpbench_args)
@@ -211,7 +214,8 @@
                    revision='abc123',
                    path_config='kitchen',
                    swarm_out_dir='[SWARM_OUT_DIR]',
-                   reduce_ops_task_splitting='true') +
+                   reduce_ops_task_splitting='true',
+                   gpu_resource_cache_limit='16777216') +
     api.path.exists(
         api.path['start_dir'].join('skia'),
         api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
diff --git a/tools/skpbench/skpbench.py b/tools/skpbench/skpbench.py
index 6a12549..9ae3c34 100755
--- a/tools/skpbench/skpbench.py
+++ b/tools/skpbench/skpbench.py
@@ -95,6 +95,9 @@
 __argparse.add_argument('srcs',
   nargs='+',
   help=".skp files or directories to expand for .skp files, and/or .svg files")
+__argparse.add_argument('--gpuResourceCacheLimit',
+  type=int, default=-1,
+  help="Maximum number of bytes to use for budgeted GPU resources.")
 
 FLAGS = __argparse.parse_args()
 if FLAGS.adb:
@@ -167,6 +170,9 @@
   if FLAGS.reduceOpsTaskSplitting:
     ARGV.extend(['--reduceOpsTaskSplitting'])
 
+  if FLAGS.gpuResourceCacheLimit:
+    ARGV.extend(['--gpuResourceCacheLimit', str(FLAGS.gpuResourceCacheLimit)])
+
   if FLAGS.adb:
     if FLAGS.device_serial is None:
       ARGV[:0] = [FLAGS.adb_binary, 'shell']