Skip to content

Commit b4fea58

Browse files
committed
Merge branch 'master' into ao-fix-issue-64
* master: (256 commits) fix chunk fetching network compatibility zombienet test (#6988) chore: delete repeat words (#7034) Print taplo version in CI (#7041) Implement cumulus StorageWeightReclaim as wrapping transaction extension + frame system ReclaimWeight (#6140) Make `TransactionExtension` tuple of tuple transparent for implication (#7028) Replace duplicated whitelist with whitelisted_storage_keys (#7024) [WIP] Fix networking-benchmarks (#7036) [docs] Fix release naming (#7032) migrate pallet-mixnet to umbrella crate (#6986) Improve remote externalities logging (#7021) Fix polkadot sdk doc. (#7022) Remove warning log from frame-omni-bencher CLI (#7020) [pallet-revive] fix file case (#6981) Add workflow for networking benchmarks (#7029) [CI] Skip SemVer on R0-silent and update docs (#6285) correct path in cumulus README (#7001) sync: Send already connected peers to new subscribers (#7011) Excluding chainlink domain for link checker CI (#6524) pallet-bounties: Fix benchmarks for 0 ED (#7013) Log peerset set ID -> protocol name mapping (#7005) ...
2 parents 9ea2099 + ffa90d0 commit b4fea58

File tree

1,895 files changed

+85765
-45465
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,895 files changed

+85765
-45465
lines changed

.cargo/config.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,7 @@ rustdocflags = [
99
CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true }
1010
CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true }
1111
CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true }
12+
13+
[net]
14+
retry = 5
15+
# git-fetch-with-cli = true # commented because there is a risk that a runner can be banned by github

.config/lychee.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ exclude = [
2828
"http://visitme/",
2929
"https://visitme/",
3030

31-
# TODO <https://github.com/paritytech/polkadot-sdk/issues/134>
31+
# TODO meta issue: <https://github.com/paritytech/polkadot-sdk/issues/134>
3232
"https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs",
3333
"https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html",
3434
"https://github.com/ipfs/js-ipfs-bitswap/blob/",
@@ -50,8 +50,10 @@ exclude = [
5050
"https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html",
5151

5252
# Behind a captcha (code 403):
53+
"https://chainlist.org/chain/*",
5354
"https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/",
5455
"https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/",
56+
5557
# 403 rate limited:
5658
"https://etherscan.io/block/11090290",
5759
"https://subscan.io/",

.config/nextest.toml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ retries = 5
2121
# The number of threads to run tests with. Supported values are either an integer or
2222
# the string "num-cpus". Can be overridden through the `--test-threads` option.
2323
# test-threads = "num-cpus"
24-
2524
test-threads = 20
2625

2726
# The number of threads required for each test. This is generally used in overrides to
@@ -124,3 +123,10 @@ serial-integration = { max-threads = 1 }
124123
[[profile.default.overrides]]
125124
filter = 'test(/(^ui$|_ui|ui_)/)'
126125
test-group = 'serial-integration'
126+
127+
# Running eth-rpc tests sequentially
128+
# These tests rely on a shared resource (the RPC and Node)
129+
# and would cause race conditions due to transaction nonces if run in parallel.
130+
[[profile.default.overrides]]
131+
filter = 'package(pallet-revive-eth-rpc) and test(/^tests::/)'
132+
test-group = 'serial-integration'

.config/taplo.toml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,3 +40,10 @@ keys = ["workspace.dependencies"]
4040

4141
[rule.formatting]
4242
reorder_keys = true
43+
44+
[[rule]]
45+
include = ["**/Cargo.toml"]
46+
keys = ["build-dependencies", "dependencies", "dev-dependencies"]
47+
48+
[rule.formatting]
49+
reorder_keys = true
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
name: "stop all workflows"
2+
description: "Action stops all workflows in a PR to save compute resources."
3+
inputs:
4+
app-id:
5+
description: "App id"
6+
required: true
7+
app-key:
8+
description: "App token"
9+
required: true
10+
runs:
11+
using: "composite"
12+
steps:
13+
- name: Worfklow stopper - Generate token
14+
uses: actions/create-github-app-token@v1
15+
id: app-token
16+
with:
17+
app-id: ${{ inputs.app-id }}
18+
private-key: ${{ inputs.app-key }}
19+
owner: "paritytech"
20+
repositories: "workflow-stopper"
21+
- name: Workflow stopper - Stop all workflows
22+
uses: octokit/request-action@v2.x
23+
with:
24+
route: POST /repos/paritytech/workflow-stopper/actions/workflows/stopper.yml/dispatches
25+
ref: main
26+
inputs: '${{ format(''{{ "github_sha": "{0}", "github_repository": "{1}", "github_ref_name": "{2}", "github_workflow_id": "{3}", "github_job_name": "{4}" }}'', github.event.pull_request.head.sha, github.repository, github.ref_name, github.run_id, github.job) }}'
27+
env:
28+
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}

.github/env

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034"
1+
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558"
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#!/bin/bash
2+
echo "Running script relative to `pwd`"
3+
# Find all README.docify.md files
4+
DOCIFY_FILES=$(find . -name "README.docify.md")
5+
6+
# Initialize a variable to track directories needing README regeneration
7+
NEED_REGENERATION=""
8+
9+
for file in $DOCIFY_FILES; do
10+
echo "Processing $file"
11+
12+
# Get the directory containing the docify file
13+
DIR=$(dirname "$file")
14+
15+
# Go to the directory and run cargo build
16+
cd "$DIR"
17+
cargo check --features generate-readme || { echo "Readme generation for $DIR failed. Ensure the crate compiles successfully and has a `generate-readme` feature which guards markdown compilation in the crate as follows: https://docs.rs/docify/latest/docify/macro.compile_markdown.html#conventions." && exit 1; }
18+
19+
# Check if README.md has any uncommitted changes
20+
git diff --exit-code README.md
21+
22+
if [ $? -ne 0 ]; then
23+
echo "Error: Found uncommitted changes in $DIR/README.md"
24+
NEED_REGENERATION="$NEED_REGENERATION $DIR"
25+
fi
26+
27+
# Return to the original directory
28+
cd - > /dev/null
29+
done
30+
31+
# Check if any directories need README regeneration
32+
if [ -n "$NEED_REGENERATION" ]; then
33+
echo "The following directories need README regeneration:"
34+
echo "$NEED_REGENERATION"
35+
exit 1
36+
fi

.github/scripts/cmd/cmd.py

Lines changed: 189 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def setup_logging():
5858
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
5959
'''
6060

61-
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
61+
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
6262

6363
for arg, config in common_args.items():
6464
parser_bench.add_argument(arg, **config)
@@ -67,6 +67,35 @@ def setup_logging():
6767
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
6868
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
6969

70+
71+
"""
72+
BENCH OMNI
73+
"""
74+
75+
bench_example = '''**Examples**:
76+
Runs all benchmarks
77+
%(prog)s
78+
79+
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
80+
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
81+
82+
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
83+
%(prog)s --runtime westend --fail-fast
84+
85+
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
86+
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
87+
'''
88+
89+
parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
90+
91+
for arg, config in common_args.items():
92+
parser_bench_old.add_argument(arg, **config)
93+
94+
parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
95+
parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
96+
parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
97+
98+
7099
"""
71100
FMT
72101
"""
@@ -98,12 +127,12 @@ def main():
98127

99128
print(f'args: {args}')
100129

101-
if args.command == 'bench':
130+
if args.command == 'bench-omni':
102131
runtime_pallets_map = {}
103132
failed_benchmarks = {}
104133
successful_benchmarks = {}
105134

106-
profile = "release"
135+
profile = "production"
107136

108137
print(f'Provided runtimes: {args.runtime}')
109138
# convert to mapped dict
@@ -113,11 +142,22 @@ def main():
113142

114143
# loop over remaining runtimes to collect available pallets
115144
for runtime in runtimesMatrix.values():
116-
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}")
145+
build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
146+
print(f'-- building "{runtime["name"]}" with `{build_command}`')
147+
os.system(build_command)
117148
print(f'-- listing pallets for benchmark for {runtime["name"]}')
118149
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
119-
output = os.popen(
120-
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read()
150+
list_command = f"frame-omni-bencher v1 benchmark pallet " \
151+
f"--no-csv-header " \
152+
f"--no-storage-info " \
153+
f"--no-min-squares " \
154+
f"--no-median-slopes " \
155+
f"--all " \
156+
f"--list " \
157+
f"--runtime={wasm_file} " \
158+
f"{runtime['bench_flags']}"
159+
print(f'-- running: {list_command}')
160+
output = os.popen(list_command).read()
121161
raw_pallets = output.strip().split('\n')
122162

123163
all_pallets = set()
@@ -230,6 +270,149 @@ def main():
230270
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
231271
for runtime, pallets in successful_benchmarks.items():
232272
print_and_log(f'-- {runtime}: {pallets}')
273+
274+
if args.command == 'bench':
275+
runtime_pallets_map = {}
276+
failed_benchmarks = {}
277+
successful_benchmarks = {}
278+
279+
profile = "production"
280+
281+
print(f'Provided runtimes: {args.runtime}')
282+
# convert to mapped dict
283+
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
284+
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
285+
print(f'Filtered out runtimes: {runtimesMatrix}')
286+
287+
# loop over remaining runtimes to collect available pallets
288+
for runtime in runtimesMatrix.values():
289+
build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked"
290+
print(f'-- building {runtime["name"]} with `{build_command}`')
291+
os.system(build_command)
292+
293+
chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev"
294+
295+
machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}"
296+
print(f"Running machine test for `{machine_test}`")
297+
os.system(machine_test)
298+
299+
print(f'-- listing pallets for benchmark for {chain}')
300+
list_command = f"target/{profile}/{runtime['old_bin']} " \
301+
f"benchmark pallet " \
302+
f"--no-csv-header " \
303+
f"--no-storage-info " \
304+
f"--no-min-squares " \
305+
f"--no-median-slopes " \
306+
f"--all " \
307+
f"--list " \
308+
f"--chain={chain}"
309+
print(f'-- running: {list_command}')
310+
output = os.popen(list_command).read()
311+
raw_pallets = output.strip().split('\n')
312+
313+
all_pallets = set()
314+
for pallet in raw_pallets:
315+
if pallet:
316+
all_pallets.add(pallet.split(',')[0].strip())
317+
318+
pallets = list(all_pallets)
319+
print(f'Pallets in {runtime["name"]}: {pallets}')
320+
runtime_pallets_map[runtime['name']] = pallets
321+
322+
print(f'\n')
323+
324+
# filter out only the specified pallets from collected runtimes/pallets
325+
if args.pallet:
326+
print(f'Pallets: {args.pallet}')
327+
new_pallets_map = {}
328+
# keep only specified pallets if they exist in the runtime
329+
for runtime in runtime_pallets_map:
330+
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
331+
new_pallets_map[runtime] = args.pallet
332+
333+
runtime_pallets_map = new_pallets_map
334+
335+
print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')
336+
337+
if not runtime_pallets_map:
338+
if args.pallet and not args.runtime:
339+
print(f"No pallets {args.pallet} found in any runtime")
340+
elif args.runtime and not args.pallet:
341+
print(f"{args.runtime} runtime does not have any pallets")
342+
elif args.runtime and args.pallet:
343+
print(f"No pallets {args.pallet} found in {args.runtime}")
344+
else:
345+
print('No runtimes found')
346+
sys.exit(1)
347+
348+
for runtime in runtime_pallets_map:
349+
for pallet in runtime_pallets_map[runtime]:
350+
config = runtimesMatrix[runtime]
351+
header_path = os.path.abspath(config['header'])
352+
template = None
353+
354+
chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev"
355+
356+
print(f'-- config: {config}')
357+
if runtime == 'dev':
358+
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
359+
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
360+
print(f'-- running: {search_manifest_path}')
361+
manifest_path = os.popen(search_manifest_path).read()
362+
if not manifest_path:
363+
print(f'-- pallet {pallet} not found in dev runtime')
364+
if args.fail_fast:
365+
print_and_log(f'Error: {pallet} not found in dev runtime')
366+
sys.exit(1)
367+
package_dir = os.path.dirname(manifest_path)
368+
print(f'-- package_dir: {package_dir}')
369+
print(f'-- manifest_path: {manifest_path}')
370+
output_path = os.path.join(package_dir, "src", "weights.rs")
371+
template = config['template']
372+
else:
373+
default_path = f"./{config['path']}/src/weights"
374+
xcm_path = f"./{config['path']}/src/weights/xcm"
375+
output_path = default_path
376+
if pallet.startswith("pallet_xcm_benchmarks"):
377+
template = config['template']
378+
output_path = xcm_path
379+
380+
print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
381+
cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \
382+
f"--extrinsic=* " \
383+
f"--chain={chain} " \
384+
f"--pallet={pallet} " \
385+
f"--header={header_path} " \
386+
f"--output={output_path} " \
387+
f"--wasm-execution=compiled " \
388+
f"--steps=50 " \
389+
f"--repeat=20 " \
390+
f"--heap-pages=4096 " \
391+
f"{f'--template={template} ' if template else ''}" \
392+
f"--no-storage-info --no-min-squares --no-median-slopes "
393+
print(f'-- Running: {cmd} \n')
394+
status = os.system(cmd)
395+
396+
if status != 0 and args.fail_fast:
397+
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
398+
sys.exit(1)
399+
400+
# Otherwise collect failed benchmarks and print them at the end
401+
# push failed pallets to failed_benchmarks
402+
if status != 0:
403+
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
404+
else:
405+
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]
406+
407+
if failed_benchmarks:
408+
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
409+
for runtime, pallets in failed_benchmarks.items():
410+
print_and_log(f'-- {runtime}: {pallets}')
411+
412+
if successful_benchmarks:
413+
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
414+
for runtime, pallets in successful_benchmarks.items():
415+
print_and_log(f'-- {runtime}: {pallets}')
233416

234417
elif args.command == 'fmt':
235418
command = f"cargo +nightly fmt"

0 commit comments

Comments
 (0)