connectd: don't disconnect automatically on sending warning or error.
This changes some tests which expected us to disconnect. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
798cf27cb4
commit
08f0a54fdc
|
@ -407,12 +407,6 @@ static bool is_urgent(enum peer_wire type)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* io_sock_shutdown, but in format suitable for an io_plan callback */
|
||||
static struct io_plan *io_sock_shutdown_cb(struct io_conn *conn, struct peer *unused)
|
||||
{
|
||||
return io_sock_shutdown(conn);
|
||||
}
|
||||
|
||||
static struct io_plan *encrypt_and_send(struct peer *peer,
|
||||
const u8 *msg TAKES,
|
||||
struct io_plan *(*next)
|
||||
|
@ -447,17 +441,6 @@ static struct io_plan *encrypt_and_send(struct peer *peer,
|
|||
|
||||
set_urgent_flag(peer, is_urgent(type));
|
||||
|
||||
/* We are no longer required to do this, but we do disconnect
|
||||
* after sending an error or warning. */
|
||||
if (type == WIRE_ERROR || type == WIRE_WARNING) {
|
||||
/* Might already be draining... */
|
||||
if (!peer->draining)
|
||||
drain_peer(peer);
|
||||
|
||||
/* Close as soon as we've sent this. */
|
||||
next = io_sock_shutdown_cb;
|
||||
}
|
||||
|
||||
/* We free this and the encrypted version in next write_to_peer */
|
||||
peer->sent_to_peer = cryptomsg_encrypt_msg(peer, &peer->cs, msg);
|
||||
return io_write(peer->to_peer,
|
||||
|
|
|
@ -411,29 +411,18 @@ def test_opening_tiny_channel(node_factory, anchors):
|
|||
|
||||
with pytest.raises(RpcError, match=r'They sent (ERROR|WARNING).*channel capacity is .*, which is below .*sat'):
|
||||
l1.fundchannel(l2, l2_min_capacity + overhead - 1)
|
||||
if EXPERIMENTAL_DUAL_FUND:
|
||||
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
else:
|
||||
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
|
||||
l1.fundchannel(l2, l2_min_capacity + overhead)
|
||||
|
||||
with pytest.raises(RpcError, match=r'They sent (ERROR|WARNING).*channel capacity is .*, which is below .*sat'):
|
||||
l1.fundchannel(l3, l3_min_capacity + overhead - 1)
|
||||
if EXPERIMENTAL_DUAL_FUND:
|
||||
assert only_one(l1.rpc.listpeers(l3.info['id'])['peers'])['connected']
|
||||
else:
|
||||
wait_for(lambda: l1.rpc.listpeers(l3.info['id'])['peers'] == [])
|
||||
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||
assert only_one(l1.rpc.listpeers(l3.info['id'])['peers'])['connected']
|
||||
l1.fundchannel(l3, l3_min_capacity + overhead)
|
||||
|
||||
with pytest.raises(RpcError, match=r'They sent (ERROR|WARNING).*channel capacity is .*, which is below .*sat'):
|
||||
l1.fundchannel(l4, l4_min_capacity + overhead - 1)
|
||||
if EXPERIMENTAL_DUAL_FUND:
|
||||
assert only_one(l1.rpc.listpeers(l4.info['id'])['peers'])['connected']
|
||||
else:
|
||||
wait_for(lambda: l1.rpc.listpeers(l4.info['id'])['peers'] == [])
|
||||
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
||||
assert only_one(l1.rpc.listpeers(l4.info['id'])['peers'])['connected']
|
||||
l1.fundchannel(l4, l4_min_capacity + overhead)
|
||||
|
||||
# Note that this check applies locally too, so you can't open it if
|
||||
|
@ -442,11 +431,7 @@ def test_opening_tiny_channel(node_factory, anchors):
|
|||
with pytest.raises(RpcError, match=r"channel capacity is .*, which is below .*sat"):
|
||||
l3.fundchannel(l2, l3_min_capacity + overhead - 1)
|
||||
|
||||
if EXPERIMENTAL_DUAL_FUND:
|
||||
assert only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
else:
|
||||
wait_for(lambda: l3.rpc.listpeers(l2.info['id'])['peers'] == [])
|
||||
l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
assert only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
l3.fundchannel(l2, l3_min_capacity + overhead)
|
||||
|
||||
|
||||
|
@ -1134,10 +1119,9 @@ def test_funding_fail(node_factory, bitcoind):
|
|||
with pytest.raises(RpcError, match=r'to_self_delay \d+ larger than \d+'):
|
||||
l1.rpc.fundchannel(l2.info['id'], int(funds / 10))
|
||||
|
||||
# channels disconnect on failure (v1)
|
||||
if not EXPERIMENTAL_DUAL_FUND:
|
||||
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
|
||||
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
|
||||
# channels do not disconnect on failure
|
||||
only_one(l1.rpc.listpeers()['peers'])
|
||||
only_one(l2.rpc.listpeers()['peers'])
|
||||
|
||||
# Restart l2 without ridiculous locktime.
|
||||
del l2.daemon.opts['watchtime-blocks']
|
||||
|
@ -1349,9 +1333,8 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
|
|||
|
||||
l1.rpc.fundchannel_cancel(l2.info['id'])
|
||||
|
||||
# Cancelling causes disconnection.
|
||||
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
# Cancelling does NOT cause disconnection.
|
||||
only_one(l1.rpc.listpeers(l2.info['id'])['peers'])
|
||||
amount2 = 1000000
|
||||
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount2)['funding_address']
|
||||
|
||||
|
@ -1372,8 +1355,8 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
|
|||
# But must unreserve inputs manually.
|
||||
l1.rpc.txdiscard(prep['txid'])
|
||||
|
||||
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
# Does not disconnect!
|
||||
only_one(l1.rpc.listpeers(l2.info['id'])['peers'])
|
||||
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
|
||||
prep = l1.rpc.txprepare([{funding_addr: amount}])
|
||||
|
||||
|
@ -2047,11 +2030,8 @@ def test_multifunding_wumbo(node_factory):
|
|||
with pytest.raises(RpcError, match='Amount exceeded'):
|
||||
l1.rpc.multifundchannel(destinations)
|
||||
|
||||
# Make sure it's disconnected from l2 before retrying.
|
||||
if not EXPERIMENTAL_DUAL_FUND:
|
||||
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
|
||||
else:
|
||||
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
# Open failure doesn't cause disconnect
|
||||
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
|
||||
|
||||
# This should succeed.
|
||||
destinations = [{"id": '{}@localhost:{}'.format(l2.info['id'], l2.port),
|
||||
|
|
|
@ -759,11 +759,7 @@ def test_openchannel_hook_chaining(node_factory, bitcoind):
|
|||
# the third plugin must now not be called anymore
|
||||
assert not l2.daemon.is_in_log("reject on principle")
|
||||
|
||||
if not EXPERIMENTAL_DUAL_FUND:
|
||||
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
else:
|
||||
assert only_one(l1.rpc.listpeers()['peers'])['connected']
|
||||
assert only_one(l1.rpc.listpeers()['peers'])['connected']
|
||||
# 100000sat is good for hook_accepter, so it should fail 'on principle'
|
||||
# at third hook openchannel_reject.py
|
||||
with pytest.raises(RpcError, match=r'reject on principle'):
|
||||
|
|
Loading…
Reference in New Issue