Skip to content

Commit 2ffaa46

Browse files
committed
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this: (gdb) bt #0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1 #1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1 #2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1 #3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1 #4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218 #5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236 #6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576 #7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595 ElementsProject#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596 ElementsProject#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595 ElementsProject#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596 ElementsProject#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595 ElementsProject#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595 ElementsProject#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595 ElementsProject#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596 ElementsProject#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595 ElementsProject#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595 ElementsProject#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595 ElementsProject#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595 ElementsProject#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595 ElementsProject#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595 ElementsProject#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595 ElementsProject#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595 ElementsProject#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595 ElementsProject#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595 ElementsProject#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595 ElementsProject#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595 ElementsProject#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595 ElementsProject#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596 ElementsProject#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595 ElementsProject#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595 ElementsProject#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595 ElementsProject#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595 ElementsProject#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595 ElementsProject#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595 ElementsProject#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595 ElementsProject#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625 The cause was that converting a block number to an scid truncates it at 24 bits. When we look through the index from (truncated number) to (real end number) we get every channel, which is too large to encode, so we iterate again. This fixes both that problem, and also the issue that we'd end up dividing into many empty sections until we get to the highest block number. Instead, we just tack the empty blocks on to then end of the final query. (My initial version requested 0xFFFFFFFE blocks, but the dev code which records what blocks were returned can't make a bitmap that big on 32 bit). Reported-by: George Vaccaro Signed-off-by: Rusty Russell <[email protected]>
1 parent a4fe50c commit 2ffaa46

File tree

3 files changed

+50
-22
lines changed

3 files changed

+50
-22
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ changes.
2525

2626
### Fixed
2727

28+
- Protocol: handling `query_channel_range` for large numbers of blocks
29+
(eg. 4 billion) was slow due to a bug.
2830

2931
### Security
3032

gossipd/gossipd.c

Lines changed: 44 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -659,9 +659,14 @@ static void reply_channel_range(struct peer *peer,
659659
/*~ When we need to send an array of channels, it might go over our 64k packet
660660
* size. If it doesn't, we recurse, splitting in two, etc. Each message
661661
* indicates what blocks it contains, so the recipient knows when we're
662-
* finished. */
662+
* finished.
663+
*
664+
* tail_blocks is the empty blocks at the end, in case they asked for all
665+
* blocks to 4 billion.
666+
*/
663667
static void queue_channel_ranges(struct peer *peer,
664-
u32 first_blocknum, u32 number_of_blocks)
668+
u32 first_blocknum, u32 number_of_blocks,
669+
u32 tail_blocks)
665670
{
666671
struct routing_state *rstate = peer->daemon->rstate;
667672
u8 *encoded = encode_short_channel_ids_start(tmpctx);
@@ -704,7 +709,8 @@ static void queue_channel_ranges(struct peer *peer,
704709

705710
/* If we can encode that, fine: send it */
706711
if (encode_short_channel_ids_end(&encoded, max_encoded_bytes)) {
707-
reply_channel_range(peer, first_blocknum, number_of_blocks,
712+
reply_channel_range(peer, first_blocknum,
713+
number_of_blocks + tail_blocks,
708714
encoded);
709715
return;
710716
}
@@ -717,22 +723,26 @@ static void queue_channel_ranges(struct peer *peer,
717723
first_blocknum);
718724
return;
719725
}
720-
status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u",
726+
status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u(+%u)",
721727
first_blocknum,
722728
number_of_blocks / 2,
723729
first_blocknum + number_of_blocks / 2,
724-
number_of_blocks - number_of_blocks / 2);
725-
queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2);
730+
number_of_blocks - number_of_blocks / 2,
731+
tail_blocks);
732+
queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2, 0);
726733
queue_channel_ranges(peer, first_blocknum + number_of_blocks / 2,
727-
number_of_blocks - number_of_blocks / 2);
734+
number_of_blocks - number_of_blocks / 2,
735+
tail_blocks);
728736
}
729737

730738
/*~ The peer can ask for all channels is a series of blocks. We reply with one
731739
* or more messages containing the short_channel_ids. */
732740
static u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
733741
{
742+
struct routing_state *rstate = peer->daemon->rstate;
734743
struct bitcoin_blkid chain_hash;
735-
u32 first_blocknum, number_of_blocks;
744+
u32 first_blocknum, number_of_blocks, tail_blocks;
745+
struct short_channel_id last_scid;
736746

737747
if (!fromwire_query_channel_range(msg, &chain_hash,
738748
&first_blocknum, &number_of_blocks)) {
@@ -751,14 +761,25 @@ static u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
751761
return NULL;
752762
}
753763

754-
/* This checks for 32-bit overflow! */
755-
if (first_blocknum + number_of_blocks < first_blocknum) {
756-
return towire_errorfmt(peer, NULL,
757-
"query_channel_range overflow %u+%u",
758-
first_blocknum, number_of_blocks);
759-
}
760-
761-
queue_channel_ranges(peer, first_blocknum, number_of_blocks);
764+
/* If they ask for number_of_blocks UINTMAX, and we have to divide
765+
* and conquer, we'll do a lot of unnecessary work. Cap it at the
766+
* last value we have, then send an empty reply. */
767+
if (uintmap_last(&rstate->chanmap, &last_scid.u64)) {
768+
u32 last_block = short_channel_id_blocknum(&last_scid);
769+
770+
/* u64 here avoids overflow on number_of_blocks
771+
UINTMAX for example */
772+
if ((u64)first_blocknum + number_of_blocks > last_block) {
773+
tail_blocks = first_blocknum + number_of_blocks
774+
- last_block - 1;
775+
number_of_blocks -= tail_blocks;
776+
} else
777+
tail_blocks = 0;
778+
} else
779+
tail_blocks = 0;
780+
781+
queue_channel_ranges(peer, first_blocknum, number_of_blocks,
782+
tail_blocks);
762783
return NULL;
763784
}
764785

@@ -2291,6 +2312,13 @@ static struct io_plan *query_channel_range(struct io_conn *conn,
22912312
goto fail;
22922313
}
22932314

2315+
/* Check for overflow on 32-bit machines! */
2316+
if (BITMAP_NWORDS(number_of_blocks) < number_of_blocks / BITMAP_WORD_BITS) {
2317+
status_broken("query_channel_range: huge number_of_blocks (%u) not supported",
2318+
number_of_blocks);
2319+
goto fail;
2320+
}
2321+
22942322
status_debug("sending query_channel_range for blocks %u+%u",
22952323
first_blocknum, number_of_blocks);
22962324
msg = towire_query_channel_range(NULL, &daemon->chain_hash,

tests/test_gossip.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import json
55
import logging
66
import os
7-
import pytest
87
import struct
98
import subprocess
109
import time
@@ -548,7 +547,6 @@ def check_gossip(n):
548547

549548

550549
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
551-
@pytest.mark.xfail(strict=True)
552550
def test_gossip_query_channel_range(node_factory, bitcoind):
553551
l1, l2, l3, l4 = node_factory.line_graph(4, opts={'log-level': 'io'},
554552
fundchannel=False)
@@ -662,8 +660,8 @@ def test_gossip_query_channel_range(node_factory, bitcoind):
662660
first=0,
663661
num=1000000)
664662

665-
# Turns out it sends huge number of empty replies here.
666-
l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 21)
663+
# Turns out it sends: 0+53, 53+26, 79+13, 92+7, 99+3, 102+2, 104+1, 105+999895
664+
l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)
667665

668666
# It should definitely have split
669667
assert ret['final_first_block'] != 0 or ret['final_num_blocks'] != 1000000
@@ -673,11 +671,11 @@ def test_gossip_query_channel_range(node_factory, bitcoind):
673671
assert ret['short_channel_ids'][1] == scid23
674672
l2.daemon.wait_for_log('queue_channel_ranges full: splitting')
675673

676-
# Test overflow case doesn't split forever; should only get 32 for this.
674+
# Test overflow case doesn't split forever; should still only get 8 for this
677675
ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
678676
first=1,
679677
num=429496000)
680-
l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 32)
678+
l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)
681679

682680
# And no more!
683681
time.sleep(1)

0 commit comments

Comments
 (0)