224 lines
7.3 KiB
Python
Executable File
224 lines
7.3 KiB
Python
Executable File
#!/usr/bin/python3
|
|
#
|
|
# Build performance test script
|
|
#
|
|
# Copyright (c) 2016, Intel Corporation.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify it
|
|
# under the terms and conditions of the GNU General Public License,
|
|
# version 2, as published by the Free Software Foundation.
|
|
#
|
|
# This program is distributed in the hope it will be useful, but WITHOUT
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
# more details.
|
|
#
|
|
"""Build performance test script"""
|
|
import argparse
|
|
import errno
|
|
import fcntl
|
|
import json
|
|
import logging
|
|
import os
|
|
import re
|
|
import shutil
|
|
import sys
|
|
from datetime import datetime
|
|
|
|
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
|
|
import scriptpath
|
|
scriptpath.add_oe_lib_path()
|
|
scriptpath.add_bitbake_lib_path()
|
|
import oeqa.buildperf
|
|
from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
|
|
BuildPerfTestRunner, KernelDropCaches)
|
|
from oeqa.utils.commands import runCmd
|
|
from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
|
|
|
|
|
|
# Set-up logging
|
|
LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
|
|
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
|
|
datefmt='%Y-%m-%d %H:%M:%S')
|
|
log = logging.getLogger()
|
|
|
|
|
|
def acquire_lock(lock_f):
|
|
"""Acquire flock on file"""
|
|
log.debug("Acquiring lock %s", os.path.abspath(lock_f.name))
|
|
try:
|
|
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
except IOError as err:
|
|
if err.errno == errno.EAGAIN:
|
|
return False
|
|
raise
|
|
log.debug("Lock acquired")
|
|
return True
|
|
|
|
|
|
def pre_run_sanity_check():
|
|
"""Sanity check of build environment"""
|
|
build_dir = os.environ.get("BUILDDIR")
|
|
if not build_dir:
|
|
log.error("BUILDDIR not set. Please run the build environmnent setup "
|
|
"script.")
|
|
return False
|
|
if os.getcwd() != build_dir:
|
|
log.error("Please run this script under BUILDDIR (%s)", build_dir)
|
|
return False
|
|
|
|
ret = runCmd('which bitbake', ignore_status=True)
|
|
if ret.status:
|
|
log.error("bitbake command not found")
|
|
return False
|
|
return True
|
|
|
|
def setup_file_logging(log_file):
|
|
"""Setup loggin to file"""
|
|
log_dir = os.path.dirname(log_file)
|
|
if not os.path.exists(log_dir):
|
|
os.makedirs(log_dir)
|
|
formatter = logging.Formatter(LOG_FORMAT)
|
|
handler = logging.FileHandler(log_file)
|
|
handler.setFormatter(formatter)
|
|
log.addHandler(handler)
|
|
|
|
|
|
def archive_build_conf(out_dir):
|
|
"""Archive build/conf to test results"""
|
|
src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
|
|
tgt_dir = os.path.join(out_dir, 'build', 'conf')
|
|
os.makedirs(os.path.dirname(tgt_dir))
|
|
shutil.copytree(src_dir, tgt_dir)
|
|
|
|
|
|
def update_globalres_file(result_obj, filename, metadata):
|
|
"""Write results to globalres csv file"""
|
|
# Map test names to time and size columns in globalres
|
|
# The tuples represent index and length of times and sizes
|
|
# respectively
|
|
gr_map = {'test1': ((0, 1), (8, 1)),
|
|
'test12': ((1, 1), (None, None)),
|
|
'test13': ((2, 1), (9, 1)),
|
|
'test2': ((3, 1), (None, None)),
|
|
'test3': ((4, 3), (None, None)),
|
|
'test4': ((7, 1), (10, 2))}
|
|
|
|
values = ['0'] * 12
|
|
for status, test, _ in result_obj.all_results():
|
|
if status in ['ERROR', 'SKIPPED']:
|
|
continue
|
|
(t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
|
|
if t_ind is not None:
|
|
values[t_ind:t_ind + t_len] = test.times
|
|
if s_ind is not None:
|
|
values[s_ind:s_ind + s_len] = test.sizes
|
|
|
|
log.debug("Writing globalres log to %s", filename)
|
|
rev_info = metadata['layers']['meta']
|
|
with open(filename, 'a') as fobj:
|
|
fobj.write('{},{}:{},{},'.format(metadata['hostname'],
|
|
rev_info['branch'],
|
|
rev_info['commit'],
|
|
rev_info['commit']))
|
|
fobj.write(','.join(values) + '\n')
|
|
|
|
|
|
def parse_args(argv):
|
|
"""Parse command line arguments"""
|
|
parser = argparse.ArgumentParser(
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
|
|
parser.add_argument('-D', '--debug', action='store_true',
|
|
help='Enable debug level logging')
|
|
parser.add_argument('--globalres-file',
|
|
type=os.path.abspath,
|
|
help="Append results to 'globalres' csv file")
|
|
parser.add_argument('--lock-file', default='./oe-build-perf.lock',
|
|
metavar='FILENAME', type=os.path.abspath,
|
|
help="Lock file to use")
|
|
parser.add_argument('-o', '--out-dir', default='results-{date}',
|
|
type=os.path.abspath,
|
|
help="Output directory for test results")
|
|
parser.add_argument('-x', '--xml', action='store_true',
|
|
help='Enable JUnit xml output')
|
|
parser.add_argument('--log-file',
|
|
default='{out_dir}/oe-build-perf-test.log',
|
|
help="Log file of this script")
|
|
parser.add_argument('--run-tests', nargs='+', metavar='TEST',
|
|
help="List of tests to run")
|
|
|
|
return parser.parse_args(argv)
|
|
|
|
|
|
def main(argv=None):
|
|
"""Script entry point"""
|
|
args = parse_args(argv)
|
|
|
|
# Set-up log file
|
|
out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
|
|
setup_file_logging(args.log_file.format(out_dir=out_dir))
|
|
|
|
if args.debug:
|
|
log.setLevel(logging.DEBUG)
|
|
|
|
lock_f = open(args.lock_file, 'w')
|
|
if not acquire_lock(lock_f):
|
|
log.error("Another instance of this script is running, exiting...")
|
|
return 1
|
|
|
|
if not pre_run_sanity_check():
|
|
return 1
|
|
|
|
# Check our capability to drop caches and ask pass if needed
|
|
KernelDropCaches.check()
|
|
|
|
# Load build perf tests
|
|
loader = BuildPerfTestLoader()
|
|
if args.run_tests:
|
|
suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
|
|
else:
|
|
suite = loader.loadTestsFromModule(oeqa.buildperf)
|
|
|
|
# Save test metadata
|
|
metadata = metadata_from_bb()
|
|
log.info("Testing Git revision branch:commit %s:%s (%s)",
|
|
metadata['layers']['meta']['branch'],
|
|
metadata['layers']['meta']['commit'],
|
|
metadata['layers']['meta']['commit_count'])
|
|
if args.xml:
|
|
write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
|
|
else:
|
|
with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
|
|
json.dump(metadata, fobj, indent=2)
|
|
archive_build_conf(out_dir)
|
|
|
|
runner = BuildPerfTestRunner(out_dir, verbosity=2)
|
|
|
|
# Suppress logger output to stderr so that the output from unittest
|
|
# is not mixed with occasional logger output
|
|
log.handlers[0].setLevel(logging.CRITICAL)
|
|
|
|
# Run actual tests
|
|
result = runner.run(suite)
|
|
|
|
# Restore logger output to stderr
|
|
log.handlers[0].setLevel(log.level)
|
|
|
|
if args.xml:
|
|
result.write_results_xml()
|
|
else:
|
|
result.write_results_json()
|
|
result.write_buildstats_json()
|
|
if args.globalres_file:
|
|
update_globalres_file(result, args.globalres_file, metadata)
|
|
if result.wasSuccessful():
|
|
return 0
|
|
|
|
return 2
|
|
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(main())
|
|
|