diff --git a/.travis/build.sh b/.travis/build.sh index 63f67c42f..61465f2a0 100755 --- a/.travis/build.sh +++ b/.travis/build.sh @@ -9,7 +9,6 @@ export EXPERIMENTAL_FEATURES=${EXPERIMENTAL_FEATURES:-0} export SOURCE_CHECK_ONLY=${SOURCE_CHECK_ONLY:-"false"} export COMPAT=${COMPAT:-1} export PATH=$CWD/dependencies/bin:"$HOME"/.local/bin:"$PATH" -export TIMEOUT=180 export PYTEST_PAR=2 export PYTHONPATH=$PWD/contrib/pylightning:$PYTHONPATH # If we're not in developer mode, tests spend a lot of time waiting for gossip! diff --git a/channeld/channel_wire.csv b/channeld/channel_wire.csv index 79242db83..51aaba116 100644 --- a/channeld/channel_wire.csv +++ b/channeld/channel_wire.csv @@ -186,7 +186,8 @@ msgdata,channel_dev_memleak_reply,leak,bool, # Peer presented proof it was from the future. msgtype,channel_fail_fallen_behind,1028 -msgdata,channel_fail_fallen_behind,remote_per_commitment_point,pubkey, +# This is NULL if option_static_remotekey. +msgdata,channel_fail_fallen_behind,remote_per_commitment_point,?pubkey, # Handle a channel specific feerate base ppm configuration msgtype,channel_specific_feerates,1029 diff --git a/channeld/channeld.c b/channeld/channeld.c index d74e00211..a2dcc2ea7 100644 --- a/channeld/channeld.c +++ b/channeld/channeld.c @@ -2013,10 +2013,15 @@ static void resend_commitment(struct peer *peer, const struct changed_htlc *last peer->revocations_received); } -/* BOLT #2: +/* BOLT-930a9b44076a8f25a8626b31b3d5a55c0888308c #2: * * A receiving node: - * - if it supports `option_data_loss_protect`, AND the + * - if `option_static_remotekey` applies to the commitment transaction: + * - if `next_revocation_number` is greater than expected above, AND + * `your_last_per_commitment_secret` is correct for that + * `next_revocation_number` minus 1: + *... + * - otherwise, if it supports `option_data_loss_protect`, AND the * `option_data_loss_protect` fields are present: * - if `next_revocation_number` is greater than expected above, * AND `your_last_per_commitment_secret` is correct for that @@ -2025,6 +2030,7 @@ static void resend_commitment(struct peer *peer, const struct changed_htlc *last static void check_future_dataloss_fields(struct peer *peer, u64 next_revocation_number, const struct secret *last_local_per_commit_secret, + /* This is NULL if option_static_remotekey */ const struct pubkey *remote_current_per_commitment_point) { const u8 *msg; @@ -2070,10 +2076,14 @@ static void check_future_dataloss_fields(struct peer *peer, peer_failed(peer->pps, &peer->channel_id, "Awaiting unilateral close"); } -/* BOLT #2: +/* BOLT-930a9b44076a8f25a8626b31b3d5a55c0888308c #2: * * A receiving node: - * - if it supports `option_data_loss_protect`, AND the + * - if `option_static_remotekey` applies to the commitment transaction: + * ... + * - if `your_last_per_commitment_secret` does not match the expected values: + * - SHOULD fail the channel. + * - otherwise, if it supports `option_data_loss_protect`, AND the * `option_data_loss_protect` fields are present: *... * - otherwise (`your_last_per_commitment_secret` or @@ -2084,6 +2094,7 @@ static void check_current_dataloss_fields(struct peer *peer, u64 next_revocation_number, u64 next_commitment_number, const struct secret *last_local_per_commit_secret, + /* NULL if option_static_remotekey */ const struct pubkey *remote_current_per_commitment_point) { struct secret old_commit_secret; @@ -2130,6 +2141,11 @@ static void check_current_dataloss_fields(struct peer *peer, type_to_string(tmpctx, struct secret, &old_commit_secret)); + if (!remote_current_per_commitment_point) { + status_debug("option_static_remotekey: fields are correct"); + return; + } + status_debug("Reestablish, comparing commitments. Remote's next local commitment number" " is %"PRIu64". Our next remote is %"PRIu64" with %"PRIu64 " revocations received", @@ -2207,18 +2223,22 @@ static void peer_reconnect(struct peer *peer, struct pubkey my_current_per_commitment_point, remote_current_per_commitment_point; struct secret last_local_per_commitment_secret; - bool dataloss_protect; + bool dataloss_protect, check_extra_fields; const u8 **premature_msgs = tal_arr(peer, const u8 *, 0); dataloss_protect = local_feature_negotiated(peer->localfeatures, LOCAL_DATA_LOSS_PROTECT); + /* Both these options give us extra fields to check. */ + check_extra_fields + = dataloss_protect || peer->channel->option_static_remotekey; + /* Our current per-commitment point is the commitment point in the last * received signed commitment */ get_per_commitment_point(peer->next_index[LOCAL] - 1, &my_current_per_commitment_point, NULL); - /* BOLT #2: + /* BOLT-930a9b44076a8f25a8626b31b3d5a55c0888308c #2: * * - upon reconnection: * - if a channel is in an error state: @@ -2234,7 +2254,7 @@ static void peer_reconnect(struct peer *peer, * of the next `commitment_signed` it expects to receive. * - MUST set `next_revocation_number` to the commitment number * of the next `revoke_and_ack` message it expects to receive. - * - if it supports `option_data_loss_protect`: + * - if it supports `option_data_loss_protect` or `option_static_remotekey`: * - MUST set `my_current_per_commitment_point` to its commitment * point for the last signed commitment it received from its * channel peer (i.e. the commitment_point corresponding to the @@ -2246,6 +2266,15 @@ static void peer_reconnect(struct peer *peer, * - MUST set `your_last_per_commitment_secret` to the last * `per_commitment_secret` it received */ +#if EXPERIMENTAL_FEATURES + if (peer->channel->option_static_remotekey) { + msg = towire_channel_reestablish_option_static_remotekey + (NULL, &peer->channel_id, + peer->next_index[LOCAL], + peer->revocations_received, + last_remote_per_commit_secret); + } else +#endif /* EXPERIMENTAL_FEATURES */ if (dataloss_protect) { msg = towire_channel_reestablish_option_data_loss_protect (NULL, &peer->channel_id, @@ -2273,6 +2302,21 @@ static void peer_reconnect(struct peer *peer, } while (handle_peer_gossip_or_error(peer->pps, &peer->channel_id, msg) || capture_premature_msg(&premature_msgs, msg)); +#if EXPERIMENTAL_FEATURES + if (peer->channel->option_static_remotekey) { + if (!fromwire_channel_reestablish_option_static_remotekey(msg, + &channel_id, + &next_commitment_number, + &next_revocation_number, + &last_local_per_commitment_secret)) { + peer_failed(peer->pps, + &peer->channel_id, + "bad reestablish static_remotekey msg: %s %s", + wire_type_name(fromwire_peektype(msg)), + tal_hex(msg, msg)); + } + } else +#endif /* EXPERIMENTAL_FEATURES */ if (dataloss_protect) { if (!fromwire_channel_reestablish_option_data_loss_protect(msg, &channel_id, @@ -2361,9 +2405,10 @@ static void peer_reconnect(struct peer *peer, next_revocation_number, peer->next_index[LOCAL]); } else if (next_revocation_number > peer->next_index[LOCAL] - 1) { - if (!dataloss_protect) - /* They don't support option_data_loss_protect, we - * fail it due to unexpected number */ + if (!check_extra_fields) + /* They don't support option_data_loss_protect or + * option_static_remotekey, we fail it due to + * unexpected number */ peer_failed(peer->pps, &peer->channel_id, "bad reestablish revocation_number: %"PRIu64 @@ -2376,6 +2421,7 @@ static void peer_reconnect(struct peer *peer, check_future_dataloss_fields(peer, next_revocation_number, &last_local_per_commitment_secret, + peer->channel->option_static_remotekey ? NULL : &remote_current_per_commitment_point); } else retransmit_revoke_and_ack = false; @@ -2418,12 +2464,14 @@ static void peer_reconnect(struct peer *peer, retransmit_commitment_signed = false; /* After we checked basic sanity, we check dataloss fields if any */ - if (dataloss_protect) + if (check_extra_fields) check_current_dataloss_fields(peer, next_revocation_number, next_commitment_number, &last_local_per_commitment_secret, - &remote_current_per_commitment_point); + peer->channel->option_static_remotekey + ? NULL + : &remote_current_per_commitment_point); /* We have to re-send in the same order we sent originally: * revoke_and_ack (usually) alters our next commitment. */ @@ -2947,6 +2995,10 @@ static void init_channel(struct peer *peer) feerate_per_kw[LOCAL], feerate_per_kw[REMOTE], peer->feerate_min, peer->feerate_max); +#if EXPERIMENTAL_FEATURES + status_debug("option_static_remotekey = %u", option_static_remotekey); +#endif + if(remote_ann_node_sig && remote_ann_bitcoin_sig) { peer->announcement_node_sigs[REMOTE] = *remote_ann_node_sig; peer->announcement_bitcoin_sigs[REMOTE] = *remote_ann_bitcoin_sig; diff --git a/lightningd/channel_control.c b/lightningd/channel_control.c index d9aa93897..6b85a530a 100644 --- a/lightningd/channel_control.c +++ b/lightningd/channel_control.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -183,17 +184,29 @@ static void peer_got_shutdown(struct channel *channel, const u8 *msg) static void channel_fail_fallen_behind(struct channel *channel, const u8 *msg) { - struct pubkey per_commitment_point; - - if (!fromwire_channel_fail_fallen_behind(msg, &per_commitment_point)) { + if (!fromwire_channel_fail_fallen_behind(channel, msg, + cast_const2(struct pubkey **, + &channel->future_per_commitment_point))) { channel_internal_error(channel, "bad channel_fail_fallen_behind %s", tal_hex(tmpctx, msg)); return; } - channel->future_per_commitment_point - = tal_dup(channel, struct pubkey, &per_commitment_point); + /* per_commitment_point is NULL if option_static_remotekey, but we + * use its presence as a flag so set it any valid key in that case. */ + if (!channel->future_per_commitment_point) { + struct pubkey *any = tal(channel, struct pubkey); + if (!channel->option_static_remotekey) { + channel_internal_error(channel, + "bad channel_fail_fallen_behind %s", + tal_hex(tmpctx, msg)); + return; + } + if (!pubkey_from_node_id(any, &channel->peer->ld->id)) + fatal("Our own id invalid?"); + channel->future_per_commitment_point = any; + } /* Peer sees this, so send a generic msg about unilateral close. */ channel_fail_permanent(channel, "Awaiting unilateral close"); diff --git a/tests/test_connection.py b/tests/test_connection.py index b7c82ce98..042254e3e 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1618,7 +1618,12 @@ def test_dataloss_protection(node_factory, bitcoind): orig_db = open(dbpath, "rb").read() l2.start() - # l1 should have sent WIRE_CHANNEL_REESTABLISH with option_data_loss_protect. + if EXPERIMENTAL_FEATURES: + # No my_current_per_commitment_point with option_static_remotekey + my_current_per_commitment_point_regex = "" + else: + my_current_per_commitment_point_regex = "0[23][0-9a-f]{64}" + # l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields. l1.daemon.wait_for_log(r"\[OUT\] 0088" # channel_id "[0-9a-f]{64}" @@ -1630,8 +1635,9 @@ def test_dataloss_protection(node_factory, bitcoind): # trigger a fee-update and commit, hence this may not # be zero) "[0-9a-f]{64}" - # my_current_per_commitment_point - "0[23][0-9a-f]{64}") + # my_current_per_commitment_point (maybe) + + my_current_per_commitment_point_regex + "'$") + # After an htlc, we should get different results (two more commits) l1.pay(l2, 200000000) @@ -1642,7 +1648,7 @@ def test_dataloss_protection(node_factory, bitcoind): l2.restart() - # l1 should have sent WIRE_CHANNEL_REESTABLISH with option_data_loss_protect. + # l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields. l1.daemon.wait_for_log(r"\[OUT\] 0088" # channel_id "[0-9a-f]{64}" @@ -1652,8 +1658,8 @@ def test_dataloss_protection(node_factory, bitcoind): "000000000000000[1-9]" # your_last_per_commitment_secret "[0-9a-f]{64}" - # my_current_per_commitment_point - "0[23][0-9a-f]{64}") + # my_current_per_commitment_point (maybe) + + my_current_per_commitment_point_regex + "'$") # Now, move l2 back in time. l2.stop()