From a34c030e5ec7021e7fb452410d38abfb3993ec68 Mon Sep 17 00:00:00 2001 From: Brad Bishop Date: Mon, 23 Sep 2019 22:34:48 -0400 Subject: poky: subtree update:745e38ff0f..81f9e815d3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adrian Bunk (6): openssl: Upgrade 1.1.1c -> 1.1.1d glib-2.0: Upgrade 2.60.6 -> 2.60.7 lttng-modules: Upgrade 2.10.10 -> 2.10.11 lttng-ust: Upgrade 2.10.4 -> 2.10.5 squashfs-tools: Remove UPSTREAM_CHECK_COMMITS libmpc: Remove dead UPSTREAM_CHECK_URI Alexander Kanavin (2): runqemu: decouple gtk and gl options strace: add a timeout for running ptests Alistair Francis (1): gdb: Mark gdbserver as ALLOW_EMPTY for riscv32 Andre McCurdy (9): busybox: drop unused mount.busybox and umount.busybox wrappers busybox: drop inittab from SRC_URI ( now moved to busybox-inittab ) busybox-inittab: minor formatting tweaks base-files: drop legacy empty file /etc/default/usbd busybox: rcS and rcK should not be writeable by everyone ffmpeg: add PACKAGECONFIG controls for alsa and zlib (enable by default) libwebp: apply ARM specific config options to big endian ARM initscripts: enable alignment.sh init script for big endian ARM libunwind: apply configure over-ride to both big and little endian ARM Andrew F. Davis (4): libepoxy: Disable x11 when not building for x11 cogl: Set depends to the virtual needed not explicitly on Mesa gtk+3: Set depends to the virtual needed not explicitly on Mesa weston: Set depends to the virtual needed not explicitly on Mesa Armin Kuster (1): gcc: Security fix for CVE-2019-15847 Changhyeok Bae (1): iw: upgrade to 5.3 Changqing Li (2): classextend.py: don't extend file for file dependency report-error.bbclass: add local.conf/auto.conf into error report Chen Qi (1): python-numpy: fix build for libn32 Daniel Gomez (1): lttng-modules: Add missing SRCREV_FORMAT Diego Rondini (1): initramfs-framework: support PARTLABEL option Dmitry Eremin-Solenikov (7): image-uefi.conf: add config file holding configuration for UEFI images grub-bootconf: switch to image-uefi.conf grub-efi: switch to image-uefi.conf grub-efi.bbclass: switch to image-uefi.conf systemd-boot: switch to image-uefi.conf systemd-boot.bbclass: switch to image-uefi.conf live-vm-common.bbclass: provide efi population functions for live images Hector Palacios (1): udev-extraconf: skip mounting partitions already mounted by systemd Henning Schild (6): oe-git-proxy: allow setting SOCAT from outside oeqa: add case for oe-git-proxy Revert "oe-git-proxy: Avoid resolving NO_PROXY against local files" oe-git-proxy: disable shell pathname expansion for the whole script oe-git-proxy: NO_PROXY suffix matching without wildcard for match_host oe-git-proxy: fix dash "Bad substitution" Hongxu Jia (1): elfutils: 0.176 -> 0.177 Jack Mitchell (1): iptables: add systemd helper unit to load/restore rules Jaewon Lee (1): populate_sdk_ext: Introduce mechanism to keep nativesdk* sstate in esdk Jason Wessel (1): gnupg: Extend -native wrapper to fix gpgme-native's gpgconf problems Jiang Lu (2): glib-networking:enable glib-networking build as native package libsoup:enable libsoup build as native package Joshua Watt (4): sstatesig: Update server URI Remove SSTATE_HASHEQUIV_SERVER bitbake: bitbake: Rework hash equivalence classes/archiver: Fix WORKDIR for shared source Kai Kang (1): systemd: provides ${base_sbindir}/udevadm Khem Raj (10): ptrace: Drop ptrace aid for musl/ppc elfutils: Fix build on ppc/musl cogl: Do not depend PN-dev on empty PN musl: Update to latest master glibc: Move DISTRO_FEATURE specific do_install code for target recipe only populate_sdk_base.bbclass: nativesdk-glibc-locale is required on musl too nativesdk.bbclass: Clear out LIBCEXTENSION and ABIEXTENSION openssl: Enable os option for with-rand-seed as well weston-init: Add possibility to run weston as non-root user layer.conf: Remove weston-conf from SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS Li Zhou (1): qemu: Security Advisory - qemu - CVE-2019-15890 Limeng (1): tune-cortexa57-cortexa53: add tunes for ARM Cortex-A53-Cortex-A57 Martin Jansa (2): perf: fix build on kernels which don't have ${S}/tools/include/linux/bits.h bitbake: Revert "bitbake: cooker: Ensure bbappends are found in stable order" Maxime Roussin-BĂ©langer (1): meta: add missing descriptions and homepage in bsp Mikko Rapeli (2): busybox.inc: handle empty DEBUG_PREFIX_MAP bitbake: svn fetcher: allow "svn propget svn:externals" to fail Nathan Rossi (7): resulttool: Handle multiple series containing ptestresults gcc-cross.inc: Process binaries in build dir to be relocatable oeqa/core/case.py: Add OEPTestResultTestCase for ptestresult helpers oeqa/selftest: Rework toolchain tests to use OEPTestResultTestCase glibc-testsuite: SkipRecipe if libc is not glibc cmake: 3.15.2 -> 3.15.3 meson.bbclass: Handle microblaze* mapping to cpu family Oleksandr Kravchuk (5): python3-pygobject: update to 3.34.0 font-util: update to 1.3.2 expat: update to 2.2.8 curl: update to 7.66.0 python3-dbus: update to 1.2.12 Otavio Salvador (1): mesa: Upgrade 19.1.1 -> 19.1.6 Peter Kjellerstedt (3): glibc: Make it build without ldconfig in DISTRO_FEATURES package_rpm.bbclass: Remove a misleading bb.note() tzdata: Correct the packaging of /etc/localtime and /etc/timezone Quentin Schulz (1): externalsrc: stop rebuilds of 2+ externalsrc recipes sharing the same git repo Randy MacLeod (4): valgrind: enable ~500 more ptests valgrind: make a few more ptests pass valgrind: ptest improvements to run-ptest and more valgrind: disable 256 ptests for aarch64 Richard Purdie (8): bitbake: runqueue/siggen: Optimise hash equiv queries runqemu: Mention snapshot in the help output initramfs-framework: support PARTLABEL option systemd: Handle slow to boot mips hwdb update timeouts meta-extsdk: Either an sstate task is a proper task or it isn't oeqa/concurrenttest: Use ionice to delete build directories bitbake: utils: Add ionice option to prunedir build-appliance-image: Update to master head revision Robert Yang (2): conf/multilib.conf: Add ovmf to NON_MULTILIB_RECIPES bitbake: runqueue: validate_hashes(): currentcount should be a number Ross Burton (16): libtasn1: fix build with api-documentation enabled gstreamer1.0-libav: enable gtk-doc again python3: handle STAGING_LIBDIR/INCDIR being unset mesa: no need to depend on target python3 adwaita-icon-theme: fix rare install race oeqa/selftest/wic: improve assert messages in test_fixed_size oeqa/selftest/imagefeatures: dump the JSON if it can't be parsed libical: upgrade to 3.0.6 acpica: upgrade 20190509 -> 20190816 gdk-pixbuf: upgrade 2.38.1 -> 2.38.2 piglit: upgrade to latest revision libinput: upgrade 1.14.0 -> 1.14.1 rootfs-postcommands: check /etc/gconf exists before working on it systemd-systemctl-native: don't care about line endings opkg-utils: respect SOURCE_DATE_EPOCH when building ipkgs bitbake: fetch2/git: add git-lfs toggle option Scott Murray (1): systemd: upgrade to 243 Stefan Ghinea (1): ghostscript: CVE-2019-14811, CVE-2019-14817 Tim Blechmann (1): icecc: blacklist pixman Yeoh Ee Peng (3): bitbake: bitbake-layers: show-recipes: Show recipes only bitbake: bitbake-layers: show-recipes: Select recipes from selected layer bitbake: bitbake-layers: show-recipes: Enable bare output Yi Zhao (3): screen: add /etc/screenrc as global config file nfs-utils: fix nfs mount error on 32bit nfs server grub: remove diffutils and freetype runtime dependencies Zang Ruochen (2): btrfs-tools:upgrade 5.2.1 -> 5.2.2 timezone:upgrade 2019b -> 2019c Change-Id: I1ec24480a8964e474cd99d60a0cb0975e49b46b8 Signed-off-by: Brad Bishop --- poky/bitbake/lib/hashserv/server.py | 414 ++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 poky/bitbake/lib/hashserv/server.py (limited to 'poky/bitbake/lib/hashserv/server.py') diff --git a/poky/bitbake/lib/hashserv/server.py b/poky/bitbake/lib/hashserv/server.py new file mode 100644 index 000000000..0aff77688 --- /dev/null +++ b/poky/bitbake/lib/hashserv/server.py @@ -0,0 +1,414 @@ +# Copyright (C) 2019 Garmin Ltd. +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from contextlib import closing +from datetime import datetime +import asyncio +import json +import logging +import math +import os +import signal +import socket +import time + +logger = logging.getLogger('hashserv.server') + + +class Measurement(object): + def __init__(self, sample): + self.sample = sample + + def start(self): + self.start_time = time.perf_counter() + + def end(self): + self.sample.add(time.perf_counter() - self.start_time) + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args, **kwargs): + self.end() + + +class Sample(object): + def __init__(self, stats): + self.stats = stats + self.num_samples = 0 + self.elapsed = 0 + + def measure(self): + return Measurement(self) + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.end() + + def add(self, elapsed): + self.num_samples += 1 + self.elapsed += elapsed + + def end(self): + if self.num_samples: + self.stats.add(self.elapsed) + self.num_samples = 0 + self.elapsed = 0 + + +class Stats(object): + def __init__(self): + self.reset() + + def reset(self): + self.num = 0 + self.total_time = 0 + self.max_time = 0 + self.m = 0 + self.s = 0 + self.current_elapsed = None + + def add(self, elapsed): + self.num += 1 + if self.num == 1: + self.m = elapsed + self.s = 0 + else: + last_m = self.m + self.m = last_m + (elapsed - last_m) / self.num + self.s = self.s + (elapsed - last_m) * (elapsed - self.m) + + self.total_time += elapsed + + if self.max_time < elapsed: + self.max_time = elapsed + + def start_sample(self): + return Sample(self) + + @property + def average(self): + if self.num == 0: + return 0 + return self.total_time / self.num + + @property + def stdev(self): + if self.num <= 1: + return 0 + return math.sqrt(self.s / (self.num - 1)) + + def todict(self): + return {k: getattr(self, k) for k in ('num', 'total_time', 'max_time', 'average', 'stdev')} + + +class ServerClient(object): + def __init__(self, reader, writer, db, request_stats): + self.reader = reader + self.writer = writer + self.db = db + self.request_stats = request_stats + + async def process_requests(self): + try: + self.addr = self.writer.get_extra_info('peername') + logger.debug('Client %r connected' % (self.addr,)) + + # Read protocol and version + protocol = await self.reader.readline() + if protocol is None: + return + + (proto_name, proto_version) = protocol.decode('utf-8').rstrip().split() + if proto_name != 'OEHASHEQUIV' or proto_version != '1.0': + return + + # Read headers. Currently, no headers are implemented, so look for + # an empty line to signal the end of the headers + while True: + line = await self.reader.readline() + if line is None: + return + + line = line.decode('utf-8').rstrip() + if not line: + break + + # Handle messages + handlers = { + 'get': self.handle_get, + 'report': self.handle_report, + 'get-stream': self.handle_get_stream, + 'get-stats': self.handle_get_stats, + 'reset-stats': self.handle_reset_stats, + } + + while True: + d = await self.read_message() + if d is None: + break + + for k in handlers.keys(): + if k in d: + logger.debug('Handling %s' % k) + if 'stream' in k: + await handlers[k](d[k]) + else: + with self.request_stats.start_sample() as self.request_sample, \ + self.request_sample.measure(): + await handlers[k](d[k]) + break + else: + logger.warning("Unrecognized command %r" % d) + break + + await self.writer.drain() + finally: + self.writer.close() + + def write_message(self, msg): + self.writer.write(('%s\n' % json.dumps(msg)).encode('utf-8')) + + async def read_message(self): + l = await self.reader.readline() + if not l: + return None + + try: + message = l.decode('utf-8') + + if not message.endswith('\n'): + return None + + return json.loads(message) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.error('Bad message from client: %r' % message) + raise e + + async def handle_get(self, request): + method = request['method'] + taskhash = request['taskhash'] + + row = self.query_equivalent(method, taskhash) + if row is not None: + logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash'])) + d = {k: row[k] for k in ('taskhash', 'method', 'unihash')} + + self.write_message(d) + else: + self.write_message(None) + + async def handle_get_stream(self, request): + self.write_message('ok') + + while True: + l = await self.reader.readline() + if not l: + return + + try: + # This inner loop is very sensitive and must be as fast as + # possible (which is why the request sample is handled manually + # instead of using 'with', and also why logging statements are + # commented out. + self.request_sample = self.request_stats.start_sample() + request_measure = self.request_sample.measure() + request_measure.start() + + l = l.decode('utf-8').rstrip() + if l == 'END': + self.writer.write('ok\n'.encode('utf-8')) + return + + (method, taskhash) = l.split() + #logger.debug('Looking up %s %s' % (method, taskhash)) + row = self.query_equivalent(method, taskhash) + if row is not None: + msg = ('%s\n' % row['unihash']).encode('utf-8') + #logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash'])) + else: + msg = '\n'.encode('utf-8') + + self.writer.write(msg) + finally: + request_measure.end() + self.request_sample.end() + + await self.writer.drain() + + async def handle_report(self, data): + with closing(self.db.cursor()) as cursor: + cursor.execute(''' + -- Find tasks with a matching outhash (that is, tasks that + -- are equivalent) + SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND outhash=:outhash + + -- If there is an exact match on the taskhash, return it. + -- Otherwise return the oldest matching outhash of any + -- taskhash + ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END, + created ASC + + -- Only return one row + LIMIT 1 + ''', {k: data[k] for k in ('method', 'outhash', 'taskhash')}) + + row = cursor.fetchone() + + # If no matching outhash was found, or one *was* found but it + # wasn't an exact match on the taskhash, a new entry for this + # taskhash should be added + if row is None or row['taskhash'] != data['taskhash']: + # If a row matching the outhash was found, the unihash for + # the new taskhash should be the same as that one. + # Otherwise the caller provided unihash is used. + unihash = data['unihash'] + if row is not None: + unihash = row['unihash'] + + insert_data = { + 'method': data['method'], + 'outhash': data['outhash'], + 'taskhash': data['taskhash'], + 'unihash': unihash, + 'created': datetime.now() + } + + for k in ('owner', 'PN', 'PV', 'PR', 'task', 'outhash_siginfo'): + if k in data: + insert_data[k] = data[k] + + cursor.execute('''INSERT INTO tasks_v2 (%s) VALUES (%s)''' % ( + ', '.join(sorted(insert_data.keys())), + ', '.join(':' + k for k in sorted(insert_data.keys()))), + insert_data) + + self.db.commit() + + logger.info('Adding taskhash %s with unihash %s', + data['taskhash'], unihash) + + d = { + 'taskhash': data['taskhash'], + 'method': data['method'], + 'unihash': unihash + } + else: + d = {k: row[k] for k in ('taskhash', 'method', 'unihash')} + + self.write_message(d) + + async def handle_get_stats(self, request): + d = { + 'requests': self.request_stats.todict(), + } + + self.write_message(d) + + async def handle_reset_stats(self, request): + d = { + 'requests': self.request_stats.todict(), + } + + self.request_stats.reset() + self.write_message(d) + + def query_equivalent(self, method, taskhash): + # This is part of the inner loop and must be as fast as possible + try: + cursor = self.db.cursor() + cursor.execute('SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1', + {'method': method, 'taskhash': taskhash}) + return cursor.fetchone() + except: + cursor.close() + + +class Server(object): + def __init__(self, db, loop=None): + self.request_stats = Stats() + self.db = db + + if loop is None: + self.loop = asyncio.new_event_loop() + self.close_loop = True + else: + self.loop = loop + self.close_loop = False + + self._cleanup_socket = None + + def start_tcp_server(self, host, port): + self.server = self.loop.run_until_complete( + asyncio.start_server(self.handle_client, host, port, loop=self.loop) + ) + + for s in self.server.sockets: + logger.info('Listening on %r' % (s.getsockname(),)) + # Newer python does this automatically. Do it manually here for + # maximum compatibility + s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1) + + name = self.server.sockets[0].getsockname() + if self.server.sockets[0].family == socket.AF_INET6: + self.address = "[%s]:%d" % (name[0], name[1]) + else: + self.address = "%s:%d" % (name[0], name[1]) + + def start_unix_server(self, path): + def cleanup(): + os.unlink(path) + + cwd = os.getcwd() + try: + # Work around path length limits in AF_UNIX + os.chdir(os.path.dirname(path)) + self.server = self.loop.run_until_complete( + asyncio.start_unix_server(self.handle_client, os.path.basename(path), loop=self.loop) + ) + finally: + os.chdir(cwd) + + logger.info('Listening on %r' % path) + + self._cleanup_socket = cleanup + self.address = "unix://%s" % os.path.abspath(path) + + async def handle_client(self, reader, writer): + # writer.transport.set_write_buffer_limits(0) + try: + client = ServerClient(reader, writer, self.db, self.request_stats) + await client.process_requests() + except Exception as e: + import traceback + logger.error('Error from client: %s' % str(e), exc_info=True) + traceback.print_exc() + writer.close() + logger.info('Client disconnected') + + def serve_forever(self): + def signal_handler(): + self.loop.stop() + + self.loop.add_signal_handler(signal.SIGTERM, signal_handler) + + try: + self.loop.run_forever() + except KeyboardInterrupt: + pass + + self.server.close() + self.loop.run_until_complete(self.server.wait_closed()) + logger.info('Server shutting down') + + if self.close_loop: + self.loop.close() + + if self._cleanup_socket is not None: + self._cleanup_socket() -- cgit v1.2.3