Tests: performance testing framework

These are scripts for benchmarking Blender features on real-world .blend
files. They were originally written for benchmarking Cycles performance, and
were made generic so they can be used for more Blender features.

The benchmarks can be run locally by developers. But the plan is to also run
these as part of continuous integration to track performance over time.

Currently there are tests for Cycles rendering and .blend file loading.

Documentation:
https://wiki.blender.org/wiki/Tools/Tests/Performance

Main features:
* User created configurations to quickly run, re-run and analyze a selected
  subset of tests.
* Supports both benchmarking with existing builds, and automatic building of
  specified git commits, tags and branches.
* Generate HTML page with bar and line graphs from test results.
* Controlled using simple command line tool.
* For writing tests, convenient abstraction to run a Python function in Blender
  with arguments and return value.

Ref T74730

Differential Revision: https://developer.blender.org/D11662
This commit is contained in:
Brecht Van Lommel 2020-03-15 00:30:26 +01:00
parent bb971bead9
commit dc3f46d96b
Notes: blender-bot 2023-02-14 05:53:38 +01:00
Referenced by issue #74730, Automated performance testing
12 changed files with 1293 additions and 0 deletions

View File

@ -0,0 +1,8 @@
# Apache License, Version 2.0
from .environment import TestEnvironment
from .device import TestDevice, TestMachine
from .config import TestEntry, TestQueue, TestConfig
from .test import Test, TestCollection
from .graph import TestGraph

View File

@ -0,0 +1,255 @@
# Apache License, Version 2.0
import fnmatch
import json
import pathlib
import sys
from dataclasses import dataclass, field
from typing import Dict, List
from .test import TestCollection
def get_build_hash(args: None) -> str:
import bpy
import sys
build_hash = bpy.app.build_hash.decode('utf-8')
return '' if build_hash == 'Unknown' else build_hash
@dataclass
class TestEntry:
"""Test to run, a combination of revision, test and device."""
test: str = ''
category: str = ''
revision: str = ''
git_hash: str = ''
executable: str = ''
date: int = 0
device_type: str = 'CPU'
device_id: str = 'CPU'
device_name: str = 'Unknown CPU'
status: str = 'queued'
output: Dict = field(default_factory=dict)
benchmark_type: str = 'comparison'
def to_json(self) -> Dict:
json_dict = {}
for field in self.__dataclass_fields__:
json_dict[field] = getattr(self, field)
return json_dict
def from_json(self, json_dict):
for field in self.__dataclass_fields__:
setattr(self, field, json_dict[field])
class TestQueue:
"""Queue of tests to be run or inspected. Matches JSON file on disk."""
def __init__(self, filepath: pathlib.Path):
self.filepath = filepath
self.has_multiple_revisions_to_build = False
self.has_multiple_categories = False
self.entries = []
if self.filepath.is_file():
with open(self.filepath, 'r') as f:
json_entries = json.load(f)
for json_entry in json_entries:
entry = TestEntry()
entry.from_json(json_entry)
self.entries.append(entry)
def rows(self, use_revision_columns: bool) -> List:
# Generate rows of entries for printing and running.
entries = sorted(self.entries, key=lambda entry:
(entry.revision,
entry.device_id,
entry.category,
entry.test))
if not use_revision_columns:
# One entry per row.
return [[entry] for entry in entries]
else:
# Multiple revisions per row.
rows = {}
for entry in entries:
key = (entry.device_id, entry.category, entry.test)
if key in rows:
rows[key].append(entry)
else:
rows[key] = [entry]
return [value for _, value in sorted(rows.items())]
def find(self, revision: str, test: str, category: str, device_id: str) -> Dict:
for entry in self.entries:
if entry.revision == revision and \
entry.test == test and \
entry.category == category and \
entry.device_id == device_id:
return entry
return None
def write(self) -> None:
json_entries = [entry.to_json() for entry in self.entries]
with open(self.filepath, 'w') as f:
json.dump(json_entries, f, indent=2)
class TestConfig:
"""Test configuration, containing a subset of revisions, tests and devices."""
def __init__(self, env, name: str):
# Init configuration from config.py file.
self.name = name
self.base_dir = env.base_dir / name
self.logs_dir = self.base_dir / 'logs'
config = self._read_config_module()
self.tests = TestCollection(env,
getattr(config, 'tests', ['*']),
getattr(config, 'categories', ['*']))
self.revisions = getattr(config, 'revisions', {})
self.builds = getattr(config, 'builds', {})
self.queue = TestQueue(self.base_dir / 'results.json')
self.benchmark_type = getattr(config, 'benchmark_type', 'comparison')
self.devices = []
self._update_devices(env, getattr(config, 'devices', ['CPU']))
self._update_queue(env)
def revision_names(self) -> List:
return sorted(list(self.revisions.keys()) + list(self.builds.keys()))
def device_name(self, device_id: str) -> str:
for device in self.devices:
if device.id == device_id:
return device.name
return "Unknown"
@staticmethod
def write_default_config(env, config_dir: pathlib.Path) -> None:
config_dir.mkdir(parents=True, exist_ok=True)
default_config = """devices = ['CPU']\n"""
default_config += """tests = ['*']\n"""
default_config += """categories = ['*']\n"""
default_config += """builds = {\n"""
default_config += """ 'master': '/home/user/blender-git/build/bin/blender',"""
default_config += """ '2.93': '/home/user/blender-2.93/blender',"""
default_config += """}\n"""
default_config += """revisions = {\n"""
default_config += """}\n"""
config_file = config_dir / 'config.py'
with open(config_file, 'w') as f:
f.write(default_config)
def _read_config_module(self) -> None:
# Import config.py as a module.
import importlib.util
spec = importlib.util.spec_from_file_location("testconfig", self.base_dir / 'config.py')
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def _update_devices(self, env, device_filters: List) -> None:
# Find devices matching the filters.
need_gpus = device_filters != ['CPU']
machine = env.get_machine(need_gpus)
self.devices = []
for device in machine.devices:
for device_filter in device_filters:
if fnmatch.fnmatch(device.id, device_filter):
self.devices.append(device)
break
def _update_queue(self, env) -> None:
# Update queue to match configuration, adding and removing entries
# so that there is one entry for each revision, device and test
# combination.
entries = []
# Get entries for specified commits, tags and branches.
for revision_name, revision_commit in self.revisions.items():
git_hash = env.resolve_git_hash(revision_commit)
date = env.git_hash_date(git_hash)
entries += self._get_entries(revision_name, git_hash, '', date)
# Optimization to avoid rebuilds.
revisions_to_build = set()
for entry in entries:
if entry.status in ('queued', 'outdated'):
revisions_to_build.add(entry.git_hash)
self.queue.has_multiple_revisions_to_build = len(revisions_to_build) > 1
# Get entries for revisions based on existing builds.
for revision_name, executable in self.builds.items():
executable_path = pathlib.Path(executable)
if not executable_path.exists():
sys.stderr.write(f'Error: build {executable} not found\n')
sys.exit(1)
env.set_blender_executable(executable_path)
git_hash, _ = env.run_in_blender(get_build_hash, {})
env.unset_blender_executable()
mtime = executable_path.stat().st_mtime
entries += self._get_entries(revision_name, git_hash, executable, mtime)
# Detect number of categories for more compact printing.
categories = set()
for entry in entries:
categories.add(entry.category)
self.queue.has_multiple_categories = len(categories) > 1
# Replace actual entries.
self.queue.entries = entries
def _get_entries(self,
revision_name: str,
git_hash: str,
executable: pathlib.Path,
date: int) -> None:
entries = []
for test in self.tests.tests:
test_name = test.name()
test_category = test.category()
for device in self.devices:
entry = self.queue.find(revision_name, test_name, test_category, device.id)
if entry:
# Test if revision hash or executable changed.
if entry.git_hash != git_hash or \
entry.executable != executable or \
entry.benchmark_type != self.benchmark_type or \
entry.date != date:
# Update existing entry.
entry.git_hash = git_hash
entry.executable = executable
entry.benchmark_type = self.benchmark_type
entry.date = date
if entry.status in ('done', 'failed'):
entry.status = 'outdated'
else:
# Add new entry if it did not exist yet.
entry = TestEntry(
revision=revision_name,
git_hash=git_hash,
executable=executable,
date=date,
test=test_name,
category=test_category,
device_type=device.type,
device_id=device.id,
device_name=device.name,
benchmark_type=self.benchmark_type)
entries.append(entry)
return entries

View File

@ -0,0 +1,68 @@
# Apache License, Version 2.0
import platform
import subprocess
from typing import List
def get_cpu_name() -> str:
# Get full CPU name.
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
cmd = ['/usr/sbin/sysctl', "-n", "machdep.cpu.brand_string"]
return subprocess.check_output(cmd).strip().decode('utf-8')
else:
with open('/proc/cpuinfo') as f:
for line in f:
if line.startswith('model name'):
return line.split(':')[1].strip()
return "Unknown CPU"
def get_gpu_device(args: None) -> List:
# Get the list of available Cycles GPU devices.
import bpy
import sys
prefs = bpy.context.preferences
cprefs = prefs.addons['cycles'].preferences
result = []
for device_type, _, _, _ in cprefs.get_device_types(bpy.context):
cprefs.compute_device_type = device_type
devices = cprefs.get_devices_for_type(device_type)
index = 0
for device in devices:
if device.type == device_type:
result.append({'type': device.type, 'name': device.name, 'index': index})
index += 1
break
return result
class TestDevice:
def __init__(self, device_type: str, device_id: str, name: str, operating_system: str):
self.type = device_type
self.id = device_id
self.name = name
self.operating_system = operating_system
class TestMachine:
def __init__(self, env, need_gpus: bool):
operating_system = platform.system()
self.devices = [TestDevice('CPU', 'CPU', get_cpu_name(), operating_system)]
self.has_gpus = need_gpus
if need_gpus and env.blender_executable:
gpu_devices, _ = env.run_in_blender(get_gpu_device, {})
for gpu_device in gpu_devices:
device_type = gpu_device['type']
device_name = gpu_device['name']
device_id = gpu_device['type'] + "_" + str(gpu_device['index'])
self.devices.append(TestDevice(device_type, device_id, device_name, operating_system))
def cpu_device(self) -> TestDevice:
return self.devices[0]

View File

@ -0,0 +1,243 @@
# Apache License, Version 2.0
import base64
import glob
import inspect
import multiprocessing
import os
import pathlib
import platform
import pickle
import subprocess
import sys
from typing import Callable, Dict, List
from .config import TestConfig
from .device import TestMachine
class TestEnvironment:
def __init__(self, blender_git_dir: pathlib.Path, base_dir: pathlib.Path):
self.blender_git_dir = blender_git_dir
self.base_dir = base_dir
self.blender_dir = base_dir / 'blender'
self.build_dir = base_dir / 'build'
self.lib_dir = base_dir / 'lib'
self.benchmarks_dir = self.blender_git_dir.parent / 'lib' / 'benchmarks'
self.git_executable = 'git'
self.cmake_executable = 'cmake'
self.cmake_options = ['-DWITH_INTERNATIONAL=OFF', '-DWITH_BUILDINFO=OFF']
self.unset_blender_executable()
self.log_file = None
self.machine = None
def get_machine(self, need_gpus: bool=True) -> None:
if not self.machine or (need_gpus and not self.machine.has_gpus):
self.machine = TestMachine(self, need_gpus)
return self.machine
def init(self, build) -> None:
if not self.benchmarks_dir.exists():
sys.stderr.write(f'Error: benchmark files directory not found at {self.benchmarks_dir}')
sys.exit(1)
# Create benchmarks folder contents.
print(f'Init {self.base_dir}')
self.base_dir.mkdir(parents=True, exist_ok=True)
if len(self.get_configs(names_only=True)) == 0:
config_dir = self.base_dir / 'default'
print(f'Creating default configuration in {config_dir}')
TestConfig.write_default_config(self, config_dir)
if build:
if not self.lib_dir.exists():
print(f'Creating symlink at {self.lib_dir}')
self.lib_dir.symlink_to(self.blender_git_dir.parent / 'lib')
else:
print(f'Exists {self.lib_dir}')
if not self.blender_dir.exists():
print(f'Init git worktree in {self.blender_dir}')
self.call([self.git_executable, 'worktree', 'add', '--detach', self.blender_dir, 'HEAD'], self.blender_git_dir)
else:
print(f'Exists {self.blender_dir}')
if not self.build_dir.exists():
print(f'Init build in {self.build_dir}')
self.build_dir.mkdir()
# No translation to avoid dealing with submodules
self.call([self.cmake_executable, self.blender_dir, '.'] + self.cmake_options, self.build_dir)
else:
print(f'Exists {self.build_dir}')
print("Building")
self.build()
print('Done')
def checkout(self) -> None:
# Checkout Blender revision
if not self.blender_dir.exists():
sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
sys.exit(1)
self.call([self.git_executable, 'clean', '-f', '-d'], self.blender_dir)
self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
self.call([self.git_executable, 'checkout', '--detach', git_hash], self.blender_dir)
self.build()
def build(self) -> None:
# Build Blender revision
if not self.build_dir.exists():
sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
sys.exit(1)
jobs = str(multiprocessing.cpu_count())
self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
def set_blender_executable(self, executable_path: pathlib.Path) -> None:
# Run all Blender commands with this executable.
self.blender_executable = executable_path
def unset_blender_executable(self) -> None:
if platform.system() == "Windows":
self.blender_executable = self.build_dir / 'bin' / 'blender.exe'
elif platform.system() == "Darwin":
self.blender_executable = self.build_dir / 'bin' / 'Blender.app' / 'Contents' / 'MacOS' / 'Blender'
else:
self.blender_executable = self.build_dir / 'bin' / 'blender'
if not self.blender_executable.exists():
self.blender_executable = 'blender'
def set_log_file(self, filepath: pathlib.Path, clear=True) -> None:
# Log all commands and output to this file.
self.log_file = filepath
if clear:
self.log_file.unlink(missing_ok=True)
def unset_log_file(self) -> None:
self.log_file = None
def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
# Execute command with arguments in specified directory,
# and return combined stdout and stderr output.
# Open log file for writing
f = None
if self.log_file:
if not self.log_file.exists():
self.log_file.parent.mkdir(parents=True, exist_ok=True)
f = open(self.log_file, 'a')
f.write('\n' + ' '.join([str(arg) for arg in args]) + '\n\n')
proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Read line by line
lines = []
try:
while proc.poll() is None:
line = proc.stdout.readline()
if line:
line_str = line.decode('utf-8', 'ignore')
lines.append(line_str)
if f:
f.write(line_str)
except KeyboardInterrupt:
# Avoid processes that keep running when interrupting.
proc.terminate()
if f:
f.close()
# Print command output on error
if proc.returncode != 0 and not silent:
for line in lines:
print(line.rstrip())
raise Exception("Error executing command")
return lines
def call_blender(self, args: List[str], foreground=False) -> List[str]:
# Execute Blender command with arguments.
common_args = ['--factory-startup', '--enable-autoexec', '--python-exit-code', '1']
if foreground:
common_args += ['--no-window-focus', '--window-geometry', '0', '0', '1024', '768']
else:
common_args += ['--background']
return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
def run_in_blender(self,
function: Callable[[Dict], Dict],
args: Dict,
blender_args: List=[],
foreground=False) -> Dict:
# Run function in a Blender instance. Arguments and return values are
# passed as a Python object that must be serializable with pickle.
# Get information to call this function from Blender.
package_path = pathlib.Path(__file__).parent.parent
functionname = function.__name__
modulename = inspect.getmodule(function).__name__
# Serialize arguments in base64, to avoid having to escape it.
args = base64.b64encode(pickle.dumps(args))
output_prefix = 'TEST_OUTPUT: '
expression = (f'import sys, pickle, base64\n'
f'sys.path.append("{package_path}")\n'
f'import {modulename}\n'
f'args = pickle.loads(base64.b64decode({args}))\n'
f'result = {modulename}.{functionname}(args)\n'
f'result = base64.b64encode(pickle.dumps(result))\n'
f'print("{output_prefix}" + result.decode())\n')
expr_args = blender_args + ['--python-expr', expression]
lines = self.call_blender(expr_args, foreground=foreground)
# Parse output.
for line in lines:
if line.startswith(output_prefix):
output = line[len(output_prefix):].strip()
result = pickle.loads(base64.b64decode(output))
return result, lines
return {}, lines
def find_blend_files(self, dirpath: pathlib.Path) -> List:
# Find .blend files in subdirectories of the given directory in the
# lib/benchmarks directory.
dirpath = self.benchmarks_dir / dirpath
filepaths = []
for filename in glob.iglob(str(dirpath / '*.blend'), recursive=True):
filepaths.append(pathlib.Path(filename))
return filepaths
def get_configs(self, name: str=None, names_only: bool=False) -> List:
# Get list of configurations in the benchmarks directory.
configs = []
if self.base_dir.exists():
for dirname in os.listdir(self.base_dir):
if not name or dirname == name:
dirpath = self.base_dir / dirname / 'config.py'
if dirpath.exists():
if names_only:
configs.append(dirname)
else:
configs.append(TestConfig(self, dirname))
return configs
def resolve_git_hash(self, revision):
# Get git hash for a tag or branch.
return self.call([self.git_executable, 'rev-parse', revision], self.blender_git_dir)[0].strip()
def git_hash_date(self, git_hash):
# Get commit data for a git hash.
return int(self.call([self.git_executable, 'log', '-n1', git_hash, '--format=%at'], self.blender_git_dir)[0].strip())

View File

@ -0,0 +1,105 @@
# Apache License, Version 2.0
from . import TestQueue
import json
import pathlib
from typing import Dict, List
class TestGraph:
def __init__(self, json_filepaths: List[pathlib.Path]):
# Initialize graph from JSON file. Note that this is implemented without
# accessing any benchmark environment or configuration. This ways benchmarks
# run on various machines can be aggregated and the graph generated on another
# machine.
# Gather entries for each device.
devices = {}
for json_filepath in json_filepaths:
queue = TestQueue(json_filepath)
for entry in queue.entries:
if entry.status in ('done', 'outdated'):
device_name = entry.device_name
if device_name in devices.keys():
devices[device_name].append(entry)
else:
devices[device_name] = [entry]
data = []
for device_name, device_entries in devices.items():
# Gather used categories.
categories = {}
for entry in device_entries:
category = entry.category
if category in categories.keys():
categories[category].append(entry)
else:
categories[category] = [entry]
# Generate one graph for every device x category combination.
for category, category_entries in categories.items():
entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
chart_type = 'line' if entries[0].benchmark_type == 'time_series' else 'comparison'
data.append(self.chart(device_name, category, entries, chart_type))
self.json = json.dumps(data, indent=2)
def chart(self, device_name: str, category: str, entries: List, chart_type: str) -> Dict:
# Gather used tests.
tests = {}
for entry in entries:
test = entry.test
if test not in tests.keys():
tests[test] = len(tests)
# Gather used revisions.
revisions = {}
revision_dates = {}
for entry in entries:
revision = entry.revision
if revision not in revisions.keys():
revisions[revision] = len(revisions)
revision_dates[revision] = int(entry.date)
# Google Charts JSON data layout is like a spreadsheat table, with
# colums, rows and cells. We create one column for revision labels,
# and one column for each test.
cols = []
if chart_type == 'line':
cols.append({'id': '', 'label': 'Date', 'type': 'date'})
else:
cols.append({'id': '', 'label': 'Revision', 'type': 'string'})
for test, test_index in tests.items():
cols.append({'id': '', 'label': test, 'type': 'number'})
rows = []
for revision, revision_index in revisions.items():
if chart_type == 'line':
date = revision_dates[revision]
row = [{'f': None, 'v': 'Date({0})'.format(date * 1000)}]
else:
row = [{'f': None, 'v': revision}]
row += [{}] * len(tests)
rows.append({'c': row})
for entry in entries:
test_index = tests[entry.test]
revision_index = revisions[entry.revision]
time = entry.output['time']
rows[revision_index]['c'][test_index + 1] = {'f': None, 'v': time}
data = {'cols': cols, 'rows': rows}
return {'device': device_name, 'category': category, 'data': data, 'chart_type': chart_type}
def write(self, filepath: pathlib.Path) -> None:
# Write HTML page with JSON graph data embedded.
template_dir = pathlib.Path(__file__).parent
with open(template_dir / 'graph.template.html', 'r') as f:
template = f.read()
contents = template.replace('%JSON_DATA%', self.json)
with open(filepath, "w") as f:
f.write(contents)

View File

@ -0,0 +1,86 @@
<html>
<head>
<title>Benchmarks</title>
<meta charset="UTF-8">
<style type="text/css">
body { margin: 40px auto;
font-family: Arial;
font-size: 14px;
color: #333;
max-width: 900px; }
a { text-decoration: none; color: #06b; }
</style>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script>
google.charts.load('current', {'packages':['line', 'bar']});
google.charts.setOnLoadCallback(draw_charts);
function transposeDataTable(dt)
{
/* Swap rows and columns. Bar and line charts expect different layouts,
* with this function we can use the same data source for both. */
var ndt = new google.visualization.DataTable;
ndt.addColumn('string',dt.getColumnLabel(0));
for(var x=1; x<dt.getNumberOfColumns(); x++) {
ndt.addRow([dt.getColumnLabel(x)]);
}
for(var x=0; x<dt.getNumberOfRows(); x++) {
ndt.addColumn('number', dt.getValue(x,0));
for(var y=1; y<dt.getNumberOfColumns(); y++) {
ndt.setValue(y-1, x+1, dt.getValue(x,y));
}
}
return ndt;
}
function draw_charts()
{
/* Load JSON data. */
var json_data = %JSON_DATA%;
/* Clear contents. */
charts_elem = document.getElementById("charts");
while(charts_elem.firstChild)
{
charts_elem.removeChild(charts_elem.firstChild);
}
/* Draw charts for each device. */
for (var i = 0; i < json_data.length; i++)
{
device = json_data[i];
/* Chart drawing options. */
var options = {
chart: {title: device["category"], subtitle: device['device']},
pointsVisible: true,
pointSize: 2.5,
height: 500,
};
/* Create chart div. */
elem = document.createElement('div');
elem.id = device["id"];
charts_elem.appendChild(elem)
/* Create chart. */
var data = new google.visualization.DataTable(device["data"]);
if (device['chart_type'] == 'line') {
var chart = new google.charts.Line(elem);
chart.draw(data, options);
}
else {
var chart = new google.charts.Bar(elem);
chart.draw(transposeDataTable(data), options);
}
}
}
</script>
</head>
<body>
<h1>Benchmarks</h1>
<div id="charts">
...
</div>
</body>
</html>

View File

@ -0,0 +1,71 @@
# Apache License, Version 2.0
import abc
import fnmatch
from typing import Dict, List
class Test:
@abc.abstractmethod
def name(self) -> str:
"""
Name of the test.
"""
@abc.abstractmethod
def category(self) -> str:
"""
Category of the test.
"""
def use_device(self) -> bool:
"""
Test uses a specific CPU or GPU device.
"""
return False
@abc.abstractmethod
def run(self, env, device_id: str) -> Dict:
"""
Execute the test and report results.
"""
class TestCollection:
def __init__(self, env, names_filter: List=['*'], categories_filter: List=['*']):
import importlib
import pkgutil
import tests
self.tests = []
# Find and import all Python files in the tests folder, and generate
# the list of tests for each.
for _, modname, _ in pkgutil.iter_modules(tests.__path__, 'tests.'):
module = importlib.import_module(modname)
tests = module.generate(env)
for test in tests:
test_category = test.category()
found = False
for category_filter in categories_filter:
if fnmatch.fnmatch(test_category, category_filter):
found = True
if not found:
continue
test_name = test.name()
found = False
for name_filter in names_filter:
if fnmatch.fnmatch(test_name, name_filter):
found = True
if not found:
continue
self.tests.append(test)
def find(self, test_name: str, test_category: str):
# Find a test based on name and category.
for test in self.tests:
if test.name() == test_name and test.category() == test_category:
return test
return None

299
tests/performance/benchmark Executable file
View File

@ -0,0 +1,299 @@
#!/usr/bin/env python3
# Apache License, Version 2.0
import api
import argparse
import fnmatch
import pathlib
import shutil
import sys
import time
from typing import List
def find_blender_git_dir() -> pathlib.Path:
# Find .git directory of the repository we are in.
cwd = pathlib.Path.cwd()
for path in [cwd] + list(cwd.parents):
if (path / '.git').exists():
return path
return None
def get_tests_base_dir(blender_git_dir: pathlib.Path) -> pathlib.Path:
# Benchmarks dir is next to the Blender source folder.
return blender_git_dir.parent / 'benchmark'
def use_revision_columns(config: api.TestConfig) -> bool:
return config.benchmark_type == "comparison" and \
len(config.queue.entries) > 0 and \
not config.queue.has_multiple_revisions_to_build
def print_header(config: api.TestConfig) -> None:
# Print header with revision columns headers.
if use_revision_columns(config):
header = ""
if config.queue.has_multiple_categories:
header += f"{'': <15} "
header += f"{'': <40} "
for revision_name in config.revision_names():
header += f"{revision_name: <20} "
print(header)
def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
# Print one or more test entries on a row.
row = ""
# For time series, print revision first.
if not use_revision_columns(config):
revision = entries[0].revision
git_hash = entries[0].git_hash
row += f"{revision: <15} "
if config.queue.has_multiple_categories:
row += f"{entries[0].category: <15} "
row += f"{entries[0].test: <40} "
for entry in entries:
# Show time or status.
status = entry.status
output = entry.output
result = ''
if status in ('done', 'outdated') and output:
result = '%.4fs' % output['time']
if status == 'outdated':
result += " (outdated)"
else:
result = status
row += f"{result: <20} "
print(row, end=end, flush=True)
def match_entry(entry: api.TestEntry, args: argparse.Namespace):
# Filter tests by name and category.
return fnmatch.fnmatch(entry.test, args.test) or \
fnmatch.fnmatch(entry.category, args.test) or \
entry.test.find(args.test) != -1 or \
entry.category.find(args.test) != -1
def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry: api.TestEntry):
# Check if entry needs to be run.
if entry.status not in ('queued', 'outdated'):
print_row(config, row, end='\r')
return False
# Run test entry.
revision = entry.revision
git_hash = entry.git_hash
testname = entry.test
testcategory = entry.category
device_type = entry.device_type
device_id = entry.device_id
test = config.tests.find(testname, testcategory)
if not test:
return False
# Log all output to dedicated log file.
logname = testcategory + '_' + testname + '_' + revision
if device_id != 'CPU':
logname += '_' + device_id
env.set_log_file(config.logs_dir / (logname + '.log'), clear=True)
# Build revision, or just set path to existing executable.
entry.status = 'building'
print_row(config, row, end='\r')
if len(entry.executable):
env.set_blender_executable(pathlib.Path(entry.executable))
else:
env.checkout(git_hash)
env.build(git_hash)
# Run test and update output and status.
entry.status = 'running'
print_row(config, row, end='\r')
entry.output = test.run(env, device_id)
entry.status = 'done' if entry.output else 'failed'
print_row(config, row, end='\r')
# Update device name in case the device changed since the entry was created.
entry.device_name = config.device_name(device_id)
# Restore default logging and Blender executable.
env.unset_log_file()
env.unset_blender_executable()
return True
def cmd_init(env: api.TestEnvironment, argv: List):
# Initialize benchmarks folder.
parser = argparse.ArgumentParser()
parser.add_argument('--build', default=False, action='store_true')
args = parser.parse_args(argv)
env.set_log_file(env.base_dir / 'setup.log', clear=False)
env.init(args.build)
env.unset_log_file()
def cmd_list(env: api.TestEnvironment, argv: List) -> None:
# List devices, tests and configurations.
print('DEVICES')
machine = env.get_machine()
for device in machine.devices:
name = f"{device.name} ({device.operating_system})"
print(f"{device.id: <15} {name}")
print('')
print('TESTS')
collection = api.TestCollection(env)
for test in collection.tests:
print(f"{test.category(): <15} {test.name(): <50}")
print('')
print('CONFIGS')
configs = env.get_configs(names_only=True)
for config_name in configs:
print(config_name)
def cmd_status(env: api.TestEnvironment, argv: List):
# Print status of tests in configurations.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
parser.add_argument('test', nargs='?', default='*')
args = parser.parse_args(argv)
configs = env.get_configs(args.config)
first = True
for config in configs:
if not args.config:
if first:
first = False
else:
print("")
print(config.name.upper())
print_header(config)
for row in config.queue.rows(use_revision_columns(config)):
if match_entry(row[0], args):
print_row(config, row)
def cmd_reset(env: api.TestEnvironment, argv: List):
# Reset tests to re-run them.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
parser.add_argument('test', nargs='?', default='*')
args = parser.parse_args(argv)
configs = env.get_configs(args.config)
for config in configs:
print_header(config)
for row in config.queue.rows(use_revision_columns(config)):
if match_entry(row[0], args):
for entry in row:
entry.status = 'queued'
entry.result = {}
print_row(config, row)
config.queue.write()
def cmd_run(env: api.TestEnvironment, argv: List):
# Run tests.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
parser.add_argument('test', nargs='?', default='*')
args = parser.parse_args(argv)
configs = env.get_configs(args.config)
for config in configs:
updated = False
print_header(config)
for row in config.queue.rows(use_revision_columns(config)):
if match_entry(row[0], args):
for entry in row:
if run_entry(env, config, row, entry):
updated = True
# Write queue every time in case running gets interrupted,
# so it can be resumed.
config.queue.write()
print_row(config, row)
if updated:
# Generate graph if test were run.
json_filepath = config.base_dir / "results.json"
html_filepath = config.base_dir / "results.html"
graph = api.TestGraph([json_filepath])
graph.write(html_filepath)
print("\nfile://" + str(html_filepath))
def cmd_graph(argv: List):
# Create graph from a given JSON results file.
parser = argparse.ArgumentParser()
parser.add_argument('json_file', nargs='+')
parser.add_argument('-o', '--output', type=str, required=True)
args = parser.parse_args(argv)
graph = api.TestGraph([pathlib.Path(path) for path in args.json_file])
graph.write(pathlib.Path(args.output))
def main():
usage = ('benchmark <command> [<args>]\n'
'\n'
'Commands:\n'
' init [--build] Init benchmarks directory and default config\n'
' Optionally with automated revision building setup\n'
' \n'
' list List available tests, devices and configurations\n'
' \n'
' run [<config>] [<test>] Execute tests for configuration\n'
' reset [<config>] [<test>] Clear tests results from config, for re-running\n'
' status [<config>] [<test>] List configurations and their tests\n'
' \n'
' graph a.json b.json... -o out.html Create graph from results in JSON files\n')
parser = argparse.ArgumentParser(
description='Blender performance testing',
usage=usage)
parser.add_argument('command', nargs='?', default='help')
args = parser.parse_args(sys.argv[1:2])
argv = sys.argv[2:]
blender_git_dir = find_blender_git_dir()
if blender_git_dir == None:
sys.stderr.write('Error: no blender git repository found from current working directory\n')
sys.exit(1)
if args.command == 'graph':
cmd_graph(argv)
sys.exit(0)
base_dir = get_tests_base_dir(blender_git_dir)
env = api.TestEnvironment(blender_git_dir, base_dir)
if args.command == 'init':
cmd_init(env, argv)
sys.exit(0)
if not env.base_dir.exists():
sys.stderr.write('Error: benchmark directory not initialized\n')
sys.exit(1)
if args.command == 'list':
cmd_list(env, argv)
elif args.command == 'run':
cmd_run(env, argv)
elif args.command == 'reset':
cmd_reset(env, argv)
elif args.command == 'status':
cmd_status(env, argv)
elif args.command == 'help':
parser.print_usage()
else:
sys.stderr.write(f'Unknown command: {args.command}\n')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,2 @@
# Apache License, Version 2.0

View File

@ -0,0 +1,38 @@
# Apache License, Version 2.0
import api
import os
def _run(args):
import bpy
import time
start_time = time.time()
scene = bpy.context.scene
for i in range(scene.frame_start, scene.frame_end):
scene.frame_set(scene.frame_start)
elapsed_time = time.time() - start_time
result = {'time': elapsed_time}
return result
class AnimationTest(api.Test):
def __init__(self, filepath):
self.filepath = filepath
def name(self):
return self.filepath.stem
def category(self):
return "animation"
def run(self, env, device_id):
args = {}
result, _ = env.run_in_blender(_run, args)
return result
def generate(env):
filepaths = env.find_blend_files('animation')
return [AnimationTest(filepath) for filepath in filepaths]

View File

@ -0,0 +1,39 @@
# Apache License, Version 2.0
import api
import os
import pathlib
def _run(filepath):
import bpy
import time
# Load once to ensure it's cached by OS
bpy.ops.wm.open_mainfile(filepath=filepath)
bpy.ops.wm.read_homefile()
# Measure loading the second time
start_time = time.time()
bpy.ops.wm.open_mainfile(filepath=filepath)
elapsed_time = time.time() - start_time
result = {'time': elapsed_time}
return result
class BlendLoadTest(api.Test):
def __init__(self, filepath):
self.filepath = filepath
def name(self):
return self.filepath.stem
def category(self):
return "blend_load"
def run(self, env, device_id):
result, _ = env.run_in_blender(_run, str(self.filepath))
return result
def generate(env):
filepaths = env.find_blend_files('*/*')
return [BlendLoadTest(filepath) for filepath in filepaths]

View File

@ -0,0 +1,79 @@
# Apache License, Version 2.0
import api
import os
def _run(args):
import bpy
import time
device_type = args['device_type']
device_index = args['device_index']
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
scene.render.filepath = args['render_filepath']
scene.render.image_settings.file_format = 'PNG'
scene.cycles.device = 'CPU' if device_type == 'CPU' else 'GPU'
if scene.cycles.device == 'GPU':
# Enable specified GPU in preferences.
prefs = bpy.context.preferences
cprefs = prefs.addons['cycles'].preferences
cprefs.compute_device_type = device_type
devices = cprefs.get_devices_for_type(device_type)
for device in devices:
device.use = False
index = 0
for device in devices:
if device.type == device_type:
if index == device_index:
device.use = True
break
else:
index += 1
# Render
bpy.ops.render.render(write_still=True)
return None
class CyclesTest(api.Test):
def __init__(self, filepath):
self.filepath = filepath
def name(self):
return self.filepath.stem
def category(self):
return "cycles"
def use_device(self):
return True
def run(self, env, device_id):
tokens = device_id.split('_')
device_type = tokens[0]
device_index = int(tokens[1]) if len(tokens) > 1 else 0
args = {'device_type': device_type,
'device_index': device_index,
'render_filepath': str(env.log_file.parent / (env.log_file.stem + '.png'))}
_, lines = env.run_in_blender(_run, args, ['--debug-cycles', '--verbose', '1', self.filepath])
# Parse render time from output
prefix = "Render time (without synchronization): "
time = 0.0
for line in lines:
line = line.strip()
offset = line.find(prefix)
if offset != -1:
time = line[offset + len(prefix):]
return {'time': float(time)}
raise Exception("Error parsing render time output")
def generate(env):
filepaths = env.find_blend_files('cycles-x/*')
return [CyclesTest(filepath) for filepath in filepaths]