Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[v19.x] deps: V8: cherry-pick f09dde9fbb54 #47239

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
deps: V8: cherry-pick f09dde9fbb54
Original commit message:

    Revert "[resultdb] Add ResultDB indicator"

    This reverts commit 237de893e1c0a0628a57d0f5797483d3add7f005.

    Reason for revert: breaks flake bisect:
    https://ci.chromium.org/ui/p/v8/builders/try.triggered/v8_flako/b8800423657665797553/overview

    The change added the implicit requirement to run testing with
    vpython3, which is not given everywhere.

    Original change's description:
    > [resultdb] Add ResultDB indicator
    >
    > Adds a new indicator that will send every result to ResultDB (and ultimately in a bq table; to be configured later).
    >
    > If we are not running in a ResultDB context we introduce only a minimal overhead by exiting early from indicator.
    >
    > To test these changes in a luci context with ResultDB we activated resultdb feature flag via V8-Recipe-Flags. This feature got implemented in https://crrev.com/c/3925576 .
    >
    >
    > V8-Recipe-Flags: resultdb
    > Bug: v8:13316
    > Change-Id: I5d98e8f27531b536686a8d63b993313b9d6f62c5
    > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3905385
    > Commit-Queue: Liviu Rau <[email protected]>
    > Reviewed-by: Alexander Schulze <[email protected]>
    > Cr-Commit-Position: refs/heads/main@{#83672}

    Bug: v8:13316
    Change-Id: I7e55668e365475298ed46d2fc8ee0fe1282c3e8e
    No-Presubmit: true
    No-Tree-Checks: true
    No-Try: true
    Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3952131
    Auto-Submit: Michael Achenbach <[email protected]>
    Commit-Queue: Rubber Stamper <[email protected]>
    Bot-Commit: Rubber Stamper <[email protected]>
    Cr-Commit-Position: refs/heads/main@{#83677}

Refs: v8/v8@f09dde9
richardlau committed Apr 11, 2023
commit 48fc442c3ae799f76a1e7f548504da04db2610f3
2 changes: 1 addition & 1 deletion common.gypi
Original file line number Diff line number Diff line change
@@ -36,7 +36,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.16',
'v8_embedder_string': '-node.17',

##### V8 defaults for Node.js #####

5 changes: 0 additions & 5 deletions deps/v8/.vpython3
Original file line number Diff line number Diff line change
@@ -74,8 +74,3 @@ wheel: <
name: "infra/python/wheels/protobuf-py3"
version: "version:3.19.3"
>

wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>
6 changes: 1 addition & 5 deletions deps/v8/tools/testrunner/objects/testcase.py
Original file line number Diff line number Diff line change
@@ -447,12 +447,8 @@ def cmp(x, y):
(other.suite.name, other.name, other.variant)
)

@property
def full_name(self):
return self.suite.name + '/' + self.name

def __str__(self):
return self.full_name
return self.suite.name + '/' + self.name


class D8TestCase(TestCase):
82 changes: 44 additions & 38 deletions deps/v8/tools/testrunner/testproc/indicators.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@


def print_failure_header(test, is_flaky=False):
text = [test.full_name]
text = [str(test)]
if test.output_proc.negative:
text.append('[negative]')
if is_flaky:
@@ -24,23 +24,6 @@ def print_failure_header(test, is_flaky=False):
print(output.encode(encoding, errors='replace').decode(encoding))


def formatted_result_output(result):
lines = []
if result.output.stderr:
lines.append("--- stderr ---")
lines.append(result.output.stderr.strip())
if result.output.stdout:
lines.append("--- stdout ---")
lines.append(result.output.stdout.strip())
lines.append("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
lines.append("exit code: %s" % result.output.exit_code_string)
lines.append("--- CRASHED ---")
if result.output.HasTimedOut():
lines.append("--- TIMEOUT ---")
return '\n'.join(lines)


class ProgressIndicator():

def __init__(self, context, options, test_count):
@@ -85,7 +68,19 @@ def finished(self):
for test, result, is_flaky in self._failed:
flaky += int(is_flaky)
print_failure_header(test, is_flaky=is_flaky)
print(formatted_result_output(result))
if result.output.stderr:
print("--- stderr ---")
print(result.output.stderr.strip())
if result.output.stdout:
print("--- stdout ---")
print(result.output.stdout.strip())
print("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
print("exit code: %s" % result.output.exit_code_string)
print("--- CRASHED ---")
crashed += 1
if result.output.HasTimedOut():
print("--- TIMEOUT ---")
if len(self._failed) == 0:
print("===")
print("=== All tests succeeded")
@@ -235,7 +230,7 @@ def on_test_result(self, test, result):
else:
self._passed += 1

self._print_progress(test.full_name)
self._print_progress(str(test))
if result.has_unexpected_output:
output = result.output
stdout = output.stdout.strip()
@@ -363,7 +358,10 @@ def __init__(self, context, options, test_count, framework_name):
self.test_count = 0

def on_test_result(self, test, result):
self.process_results(test, result.as_list)
if result.is_rerun:
self.process_results(test, result.results)
else:
self.process_results(test, [result])

def process_results(self, test, results):
for run, result in enumerate(results):
@@ -378,7 +376,7 @@ def process_results(self, test, results):
if not result.has_unexpected_output and run == 0:
continue

record = self._test_record(test, result, run)
record = self._test_record(test, result, output, run)
record.update({
"result": test.output_proc.get_outcome(output),
"stdout": output.stdout,
@@ -394,33 +392,41 @@ def result_value(test, result, output):
return ""
return test.output_proc.get_outcome(output)

record = self._test_record(test, result, run)
record.update(
result=result_value(test, result, output),
marked_slow=test.is_slow,
)
record = self._test_record(test, result, output, run)
record.update({
"result": result_value(test, result, output),
"marked_slow": test.is_slow,
})
self.tests.add(record)
self.duration_sum += record['duration']
self.test_count += 1

def _test_record(self, test, result, run):
record = util.base_test_record(test, result, run)
record.update(
framework_name=self.framework_name,
command=result.cmd.to_string(relative=True),
)
return record
def _test_record(self, test, result, output, run):
return {
"name": str(test),
"flags": result.cmd.args,
"command": result.cmd.to_string(relative=True),
"run": run + 1,
"exit_code": output.exit_code,
"expected": test.expected_outcomes,
"duration": output.duration,
"random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
"variant_flags": test.variant_flags,
"framework_name": self.framework_name,
}

def finished(self):
duration_mean = None
if self.test_count:
duration_mean = self.duration_sum / self.test_count

result = {
'results': self.results,
'slowest_tests': self.tests.as_list(),
'duration_mean': duration_mean,
'test_total': self.test_count,
"results": self.results,
"slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
"test_total": self.test_count,
}

with open(self.options.json_test_results, "w") as f:
3 changes: 1 addition & 2 deletions deps/v8/tools/testrunner/testproc/progress.py
Original file line number Diff line number Diff line change
@@ -6,7 +6,6 @@
from . import base
from testrunner.local import utils
from testrunner.testproc.indicators import JsonTestProgressIndicator, PROGRESS_INDICATORS
from testrunner.testproc.resultdb import ResultDBIndicator


class ResultsTracker(base.TestProcObserver):
@@ -67,7 +66,7 @@ def __init__(self, context, options, framework_name, test_count):
0,
JsonTestProgressIndicator(context, options, test_count,
framework_name))
self.procs.append(ResultDBIndicator(context, options, test_count))

self._requirement = max(proc._requirement for proc in self.procs)

def _on_result_for(self, test, result):
8 changes: 0 additions & 8 deletions deps/v8/tools/testrunner/testproc/result.py
Original file line number Diff line number Diff line change
@@ -16,10 +16,6 @@ def is_grouped(self):
def is_rerun(self):
return False

@property
def as_list(self):
return [self]


class Result(ResultBase):
"""Result created by the output processor."""
@@ -116,9 +112,5 @@ def __init__(self, results):
def is_rerun(self):
return True

@property
def as_list(self):
return self.results

def status(self):
return ' '.join(r.status() for r in self.results)
95 changes: 0 additions & 95 deletions deps/v8/tools/testrunner/testproc/resultdb.py

This file was deleted.

38 changes: 0 additions & 38 deletions deps/v8/tools/testrunner/testproc/util.py
Original file line number Diff line number Diff line change
@@ -7,7 +7,6 @@
import logging
import os
import platform
import re
import signal
import subprocess

@@ -54,43 +53,6 @@ def kill_processes_linux():
logging.exception('Failed to kill process')


def strip_ascii_control_characters(unicode_string):
return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))


def base_test_record(test, result, run):
record = {
'name': test.full_name,
'flags': result.cmd.args,
'run': run + 1,
'expected': test.expected_outcomes,
'random_seed': test.random_seed,
'target_name': test.get_shell(),
'variant': test.variant,
'variant_flags': test.variant_flags,
}
if result.output:
record.update(
exit_code=result.output.exit_code,
duration=result.output.duration,
)
return record


def extract_tags(record):
tags = []
for k, v in record.items():
if type(v) == list:
tags += [sanitized_kv_dict(k, e) for e in v]
else:
tags.append(sanitized_kv_dict(k, v))
return tags


def sanitized_kv_dict(k, v):
return dict(key=k, value=strip_ascii_control_characters(v))


class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the