lightningd: use the async mechanism for channel_update access.

Instead of saving a stripped_update, we use the new
local_fail_in_htlc_needs_update.

One minor change: we return the more correct
towire_temporary_channel_failure when the node is still syncing.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2020-02-27 14:42:17 +10:30
parent 247d249ea8
commit 40e3566e9a
7 changed files with 64 additions and 49 deletions

View File

@ -265,7 +265,6 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
= tal_steal(channel, remote_upfront_shutdown_script);
channel->option_static_remotekey = option_static_remotekey;
channel->forgets = tal_arr(channel, struct command *, 0);
channel->stripped_update = NULL;
list_add_tail(&peer->channels, &channel->list);
tal_add_destructor(channel, destroy_channel);

View File

@ -123,9 +123,6 @@ struct channel {
/* Any commands trying to forget us. */
struct command **forgets;
/* Lastest channel_update from gossipd, if any: type stripped! */
const u8 *stripped_update;
};
struct channel *new_channel(struct peer *peer, u64 dbid,

View File

@ -513,8 +513,6 @@ static void shutdown_subdaemons(struct lightningd *ld)
/*~ The three "global" daemons, which we shutdown explicitly: we
* give them 10 seconds to exit gracefully before killing them. */
ld->connectd = subd_shutdown(ld->connectd, 10);
ld->gossip = subd_shutdown(ld->gossip, 10);
ld->hsm = subd_shutdown(ld->hsm, 10);
/* Now we free all the HTLCs */
free_htlcs(ld, NULL);
@ -547,6 +545,11 @@ static void shutdown_subdaemons(struct lightningd *ld)
tal_free(p);
}
/*~ Now they're all dead, we can stop gossipd: doing it before HTLCs is
* problematic because local_fail_in_htlc_needs_update() asks gossipd */
ld->gossip = subd_shutdown(ld->gossip, 10);
ld->hsm = subd_shutdown(ld->hsm, 10);
/*~ Commit the transaction. Note that the db is actually
* single-threaded, so commits never fail and we don't need
* spin-and-retry logic everywhere. */

View File

@ -741,11 +741,13 @@ static const u8 *send_onion(const tal_t *ctx, struct lightningd *ld,
{
const u8 *onion;
unsigned int base_expiry;
bool dont_care_about_channel_update;
base_expiry = get_block_height(ld->topology) + 1;
onion = serialize_onionpacket(tmpctx, packet);
return send_htlc_out(ctx, channel, first_hop->amount,
base_expiry + first_hop->delay,
payment_hash, partid, onion, NULL, hout);
payment_hash, partid, onion, NULL, hout,
&dont_care_about_channel_update);
}
/* destination/route_channels/route_nodes are NULL (and path_secrets may be NULL)

View File

@ -280,25 +280,35 @@ const u8 *failmsg_incorrect_or_unknown(const tal_t *ctx,
}
/* localfail are for handing to the local payer if it's local. */
static void fail_out_htlc(struct htlc_out *hout, const char *localfail)
static void fail_out_htlc(struct htlc_out *hout,
const char *localfail,
const u8 *failmsg_needs_update TAKES)
{
htlc_out_check(hout, __func__);
assert(hout->failmsg || hout->failonion);
if (hout->am_origin) {
payment_failed(hout->key.channel->peer->ld, hout, localfail);
if (taken(failmsg_needs_update))
tal_free(failmsg_needs_update);
} else if (hout->in) {
const struct onionreply *failonion;
if (failmsg_needs_update) {
local_fail_in_htlc_needs_update(hout->in,
failmsg_needs_update,
hout->key.channel->scid);
} else {
const struct onionreply *failonion;
/* If we have an onion, simply copy it. */
if (hout->failonion)
failonion = hout->failonion;
/* Otherwise, we need to onionize this local error. */
else
failonion = create_onionreply(hout,
hout->in->shared_secret,
hout->failmsg);
fail_in_htlc(hout->in, failonion);
/* If we have an onion, simply copy it. */
if (hout->failonion)
failonion = hout->failonion;
/* Otherwise, we need to onionize this local error. */
else
failonion = create_onionreply(hout,
hout->in->shared_secret,
hout->failmsg);
fail_in_htlc(hout->in, failonion);
}
}
}
@ -495,15 +505,16 @@ static void destroy_hout_subd_died(struct htlc_out *hout)
"Failing HTLC %"PRIu64" due to peer death",
hout->key.id);
hout->failmsg = towire_temporary_channel_failure(hout,
hout->key.channel->stripped_update);
/* This isn't really used, except as sanity check */
hout->failmsg = towire_temporary_node_failure(hout);
/* Assign a temporary state (we're about to free it!) so checks
* are happy that it has a failure message */
assert(hout->hstate == SENT_ADD_HTLC);
hout->hstate = RCVD_REMOVE_HTLC;
fail_out_htlc(hout, "Outgoing subdaemon died");
fail_out_htlc(hout, "Outgoing subdaemon died",
take(towire_temporary_channel_failure(NULL, NULL)));
}
/* This is where channeld gives us the HTLC id, and also reports if it
@ -595,11 +606,13 @@ const u8 *send_htlc_out(const tal_t *ctx,
u64 partid,
const u8 *onion_routing_packet,
struct htlc_in *in,
struct htlc_out **houtp)
struct htlc_out **houtp,
bool *needs_update_appended)
{
u8 *msg;
*houtp = NULL;
*needs_update_appended = false;
if (!channel_can_add_htlc(out)) {
log_info(out->log, "Attempt to send HTLC but not ready (%s)",
@ -610,15 +623,14 @@ const u8 *send_htlc_out(const tal_t *ctx,
if (!out->owner) {
log_info(out->log, "Attempt to send HTLC but unowned (%s)",
channel_state_name(out));
return towire_temporary_channel_failure(ctx,
out->stripped_update);
*needs_update_appended = true;
return towire_temporary_channel_failure(ctx, NULL);
}
if (!topology_synced(out->peer->ld->topology)) {
log_info(out->log, "Attempt to send HTLC but still syncing"
" with bitcoin network");
return towire_temporary_channel_failure(ctx,
out->stripped_update);
return towire_temporary_node_failure(ctx);
}
/* Make peer's daemon own it, catch if it dies. */
@ -646,7 +658,6 @@ static void forward_htlc(struct htlc_in *hin,
struct amount_msat amt_to_forward,
u32 outgoing_cltv_value,
const struct node_id *next_hop,
const u8 *stripped_update TAKES,
const u8 next_onion[TOTAL_PACKET_SIZE])
{
const u8 *failmsg;
@ -654,6 +665,7 @@ static void forward_htlc(struct htlc_in *hin,
struct lightningd *ld = hin->key.channel->peer->ld;
struct channel *next = active_channel_by_id(ld, next_hop, NULL);
struct htlc_out *hout = NULL;
bool needs_update_appended;
/* Unknown peer, or peer not ready. */
if (!next || !next->scid) {
@ -662,17 +674,9 @@ static void forward_htlc(struct htlc_in *hin,
hin, next ? next->scid : NULL, NULL,
FORWARD_LOCAL_FAILED,
WIRE_UNKNOWN_NEXT_PEER);
if (taken(stripped_update))
tal_free(stripped_update);
return;
}
/* OK, apply any channel_update gossipd gave us for this channel. */
tal_free(next->stripped_update);
next->stripped_update
= tal_dup_arr(next, u8, stripped_update,
tal_count(stripped_update), 0);
/* BOLT #7:
*
* The origin node:
@ -685,26 +689,28 @@ static void forward_htlc(struct htlc_in *hin,
log_broken(ld->log, "Fee overflow forwarding %s!",
type_to_string(tmpctx, struct amount_msat,
&amt_to_forward));
failmsg = towire_fee_insufficient(tmpctx, hin->msat,
next->stripped_update);
needs_update_appended = true;
failmsg = towire_fee_insufficient(tmpctx, hin->msat, NULL);
goto fail;
}
if (!check_fwd_amount(hin, amt_to_forward, hin->msat, fee)) {
failmsg = towire_fee_insufficient(tmpctx, hin->msat,
next->stripped_update);
needs_update_appended = true;
failmsg = towire_fee_insufficient(tmpctx, hin->msat, NULL);
goto fail;
}
if (!check_cltv(hin, cltv_expiry, outgoing_cltv_value,
ld->config.cltv_expiry_delta)) {
needs_update_appended = true;
failmsg = towire_incorrect_cltv_expiry(tmpctx, cltv_expiry,
next->stripped_update);
NULL);
goto fail;
}
if (amount_msat_greater(amt_to_forward,
chainparams->max_payment)) {
/* ENOWUMBO! */
needs_update_appended = false;
failmsg = towire_required_channel_feature_missing(tmpctx);
goto fail;
}
@ -723,8 +729,8 @@ static void forward_htlc(struct htlc_in *hin,
"Expiry cltv %u too close to current %u",
outgoing_cltv_value,
get_block_height(ld->topology));
failmsg = towire_expiry_too_soon(tmpctx,
next->stripped_update);
needs_update_appended = true;
failmsg = towire_expiry_too_soon(tmpctx, NULL);
goto fail;
}
@ -740,18 +746,23 @@ static void forward_htlc(struct htlc_in *hin,
outgoing_cltv_value,
get_block_height(ld->topology),
ld->config.locktime_max);
needs_update_appended = false;
failmsg = towire_expiry_too_far(tmpctx);
goto fail;
}
failmsg = send_htlc_out(tmpctx, next, amt_to_forward,
outgoing_cltv_value, &hin->payment_hash,
0, next_onion, hin, &hout);
0, next_onion, hin,
&hout, &needs_update_appended);
if (!failmsg)
return;
fail:
local_fail_in_htlc(hin, failmsg);
if (needs_update_appended)
local_fail_in_htlc_needs_update(hin, failmsg, next->scid);
else
local_fail_in_htlc(hin, failmsg);
wallet_forwarded_payment_add(ld->wallet,
hin, next->scid, hout,
FORWARD_LOCAL_FAILED,
@ -798,7 +809,6 @@ static void channel_resolve_reply(struct subd *gossip, const u8 *msg,
forward_htlc(gr->hin, gr->hin->cltv_expiry,
gr->amt_to_forward, gr->outgoing_cltv_value, peer_id,
take(stripped_update),
gr->next_onion);
tal_free(gr);
}
@ -1421,7 +1431,7 @@ static void remove_htlc_out(struct channel *channel, struct htlc_out *hout)
/* If it's failed, now we can forward since it's completely locked-in */
if (!hout->preimage) {
fail_out_htlc(hout, NULL);
fail_out_htlc(hout, NULL, NULL);
} else {
struct amount_msat oldamt = channel->our_msat;
/* We paid for this HTLC, so deduct balance. */

View File

@ -47,7 +47,7 @@ void peer_got_revoke(struct channel *channel, const u8 *msg);
void update_per_commit_point(struct channel *channel,
const struct pubkey *per_commitment_point);
/* Returns NULL on success, otherwise failmsg */
/* Returns NULL on success, otherwise failmsg (and sets *needs_update_appended)*/
const u8 *send_htlc_out(const tal_t *ctx,
struct channel *out,
struct amount_msat amount, u32 cltv,
@ -55,7 +55,8 @@ const u8 *send_htlc_out(const tal_t *ctx,
u64 partid,
const u8 *onion_routing_packet,
struct htlc_in *in,
struct htlc_out **houtp);
struct htlc_out **houtp,
bool *needs_update_appended);
void onchain_failed_our_htlc(const struct channel *channel,
const struct htlc_stub *htlc,

View File

@ -184,8 +184,11 @@ def test_lightningd_still_loading(node_factory, bitcoind, executor):
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
assert 'warning_lightningd_sync' in l1.rpc.getinfo()
# Make sure it's connected to l2 (otherwise we get TEMPORARY_CHANNEL_FAILURE)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
# Payments will fail. FIXME: More informative msg?
with pytest.raises(RpcError, match=r'TEMPORARY_CHANNEL_FAILURE'):
with pytest.raises(RpcError, match=r'TEMPORARY_NODE_FAILURE'):
l1.pay(l2, 1000)
# Can't fund a new channel.