rgb-cln/lightningd/connect_control.c

677 lines
18 KiB
C
Raw Normal View History

#include "config.h"
#include <ccan/err/err.h>
#include <ccan/tal/str/str.h>
#include <common/configdir.h>
#include <common/json_command.h>
#include <common/json_helpers.h>
#include <common/json_tok.h>
#include <common/memleak.h>
#include <common/param.h>
#include <common/timeout.h>
#include <common/type_to_string.h>
#include <connectd/connectd_wiregen.h>
#include <gossipd/gossipd_wiregen.h>
#include <hsmd/capabilities.h>
#include <lightningd/channel.h>
#include <lightningd/connect_control.h>
#include <lightningd/dual_open_control.h>
#include <lightningd/hsm_control.h>
#include <lightningd/jsonrpc.h>
#include <lightningd/lightningd.h>
#include <lightningd/onion_message.h>
#include <lightningd/opening_common.h>
#include <lightningd/opening_control.h>
#include <lightningd/peer_control.h>
#include <lightningd/plugin_hook.h>
struct connect {
struct list_node list;
struct node_id id;
struct command *cmd;
};
static void destroy_connect(struct connect *c)
{
list_del(&c->list);
}
static struct connect *new_connect(struct lightningd *ld,
const struct node_id *id,
struct command *cmd)
{
struct connect *c = tal(cmd, struct connect);
c->id = *id;
c->cmd = cmd;
list_add_tail(&ld->connects, &c->list);
tal_add_destructor(c, destroy_connect);
return c;
}
/* Finds first command which matches. */
static struct connect *find_connect(struct lightningd *ld,
const struct node_id *id)
{
struct connect *i;
list_for_each(&ld->connects, i, list) {
if (node_id_eq(&i->id, id))
return i;
}
return NULL;
}
static struct command_result *connect_cmd_succeed(struct command *cmd,
const struct peer *peer,
bool incoming,
const struct wireaddr_internal *addr)
{
struct json_stream *response = json_stream_success(cmd);
json_add_node_id(response, "id", &peer->id);
json_add_hex_talarr(response, "features", peer->their_features);
json_add_string(response, "direction", incoming ? "in" : "out");
json_add_address_internal(response, "address", addr);
return command_success(cmd, response);
}
/* FIXME: Reorder! */
static void try_connect(const tal_t *ctx,
struct lightningd *ld,
const struct node_id *id,
u32 seconds_delay,
const struct wireaddr_internal *addrhint);
static struct command_result *json_connect(struct command *cmd,
const char *buffer,
const jsmntok_t *obj UNNEEDED,
const jsmntok_t *params)
{
u32 *port;
jsmntok_t *idtok;
struct node_id id;
char *id_str;
char *atptr;
char *ataddr = NULL;
const char *name;
struct wireaddr_internal *addr;
const char *err_msg;
if (!param(cmd, buffer, params,
p_req("id", param_tok, (const jsmntok_t **) &idtok),
p_opt("host", param_string, &name),
p_opt("port", param_number, &port),
NULL))
return command_param_failed();
/* Check for id@addrport form */
id_str = json_strdup(cmd, buffer, idtok);
atptr = strchr(id_str, '@');
if (atptr) {
2018-03-25 20:51:11 +01:00
int atidx = atptr - id_str;
ataddr = tal_strdup(cmd, atptr + 1);
/* Cut id. */
idtok->end = idtok->start + atidx;
}
if (!json_to_node_id(buffer, idtok, &id)) {
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
"id %.*s not valid",
json_tok_full_len(idtok),
json_tok_full(buffer, idtok));
}
if (name && ataddr) {
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
"Can't specify host as both xxx@yyy "
"and separate argument");
}
/* Get parseable host if provided somehow */
if (!name && ataddr)
name = ataddr;
/* Port without host name? */
if (port && !name) {
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
"Can't specify port without host");
}
/* Was there parseable host name? */
if (name) {
/* Is there a port? */
if (!port) {
port = tal(cmd, u32);
*port = DEFAULT_PORT;
}
addr = tal(cmd, struct wireaddr_internal);
if (!parse_wireaddr_internal(name, addr, *port, false,
!cmd->ld->always_use_proxy
&& !cmd->ld->pure_tor_setup,
true, deprecated_apis,
&err_msg)) {
return command_fail(cmd, LIGHTNINGD,
"Host %s:%u not valid: %s",
name, *port,
err_msg ? err_msg : "port is 0");
}
} else
addr = NULL;
try_connect(cmd, cmd->ld, &id, 0, addr);
/* Leave this here for peer_connected or connect_failed. */
new_connect(cmd->ld, &id, cmd);
return command_still_pending(cmd);
}
static const struct json_command connect_command = {
"connect",
"network",
json_connect,
"Connect to {id} at {host} (which can end in ':port' if not default). "
"{id} can also be of the form id@host"
};
AUTODATA(json_command, &connect_command);
/* We actually use this even if we don't need a delay, while we talk to
* gossipd to get the addresses. */
struct delayed_reconnect {
struct lightningd *ld;
struct node_id id;
u32 seconds_delayed;
struct wireaddr_internal *addrhint;
};
static void gossipd_got_addrs(struct subd *subd,
const u8 *msg,
const int *fds,
struct delayed_reconnect *d)
{
struct wireaddr *addrs;
u8 *connectmsg;
if (!fromwire_gossipd_get_addrs_reply(tmpctx, msg, &addrs))
fatal("Gossipd gave bad GOSSIPD_GET_ADDRS_REPLY %s",
tal_hex(msg, msg));
connectmsg = towire_connectd_connect_to_peer(NULL,
&d->id,
d->seconds_delayed,
addrs,
d->addrhint);
subd_send_msg(d->ld->connectd, take(connectmsg));
tal_free(d);
}
/* We might be off a delay timer. Now ask gossipd about public addresses. */
static void do_connect(struct delayed_reconnect *d)
{
u8 *msg = towire_gossipd_get_addrs(NULL, &d->id);
subd_req(d, d->ld->gossip, take(msg), -1, 0, gossipd_got_addrs, d);
}
/* peer may be NULL here */
static void try_connect(const tal_t *ctx,
struct lightningd *ld,
const struct node_id *id,
u32 seconds_delay,
const struct wireaddr_internal *addrhint)
{
struct delayed_reconnect *d;
struct peer *peer;
d = tal(ctx, struct delayed_reconnect);
d->ld = ld;
d->id = *id;
d->seconds_delayed = seconds_delay;
d->addrhint = tal_dup_or_null(d, struct wireaddr_internal, addrhint);
if (!seconds_delay) {
do_connect(d);
return;
}
log_peer_debug(ld->log, id, "Will try reconnect in %u seconds",
seconds_delay);
/* Update any channel billboards */
peer = peer_by_id(ld, id);
if (peer) {
struct channel *channel;
list_for_each(&peer->channels, channel, list) {
if (!channel_active(channel))
continue;
channel_set_billboard(channel, false,
tal_fmt(tmpctx,
"Will attempt reconnect "
"in %u seconds",
seconds_delay));
}
}
/* We fuzz the timer by up to 1 second, to avoid getting into
* simultanous-reconnect deadlocks with peer. */
notleak(new_reltimer(ld->timers, d,
timerel_add(time_from_sec(seconds_delay),
time_from_usec(pseudorand(1000000))),
do_connect, d));
}
void try_reconnect(const tal_t *ctx,
struct peer *peer,
u32 seconds_delay,
const struct wireaddr_internal *addrhint)
{
if (!peer->ld->reconnect)
return;
try_connect(ctx,
peer->ld,
&peer->id,
seconds_delay,
addrhint);
}
static void connect_failed(struct lightningd *ld, const u8 *msg)
{
struct node_id id;
errcode_t errcode;
char *errmsg;
struct connect *c;
u32 seconds_to_delay;
struct wireaddr_internal *addrhint;
struct peer *peer;
if (!fromwire_connectd_connect_failed(tmpctx, msg, &id, &errcode, &errmsg,
&seconds_to_delay, &addrhint))
fatal("Connect gave bad CONNECTD_CONNECT_FAILED message %s",
tal_hex(msg, msg));
/* We can have multiple connect commands: fail them all */
while ((c = find_connect(ld, &id)) != NULL) {
/* They delete themselves from list */
was_pending(command_fail(c->cmd, errcode, "%s", errmsg));
}
/* If we have an active channel, then reconnect. */
peer = peer_by_id(ld, &id);
if (peer) {
struct channel *channel = peer_active_channel(peer);
if (channel)
try_reconnect(peer, peer, seconds_to_delay, addrhint);
}
}
void connect_succeeded(struct lightningd *ld, const struct peer *peer,
bool incoming,
const struct wireaddr_internal *addr)
{
struct connect *c;
/* We can have multiple connect commands: fail them all */
while ((c = find_connect(ld, &peer->id)) != NULL) {
/* They delete themselves from list */
connect_cmd_succeed(c->cmd, peer, incoming, addr);
}
}
static void peer_already_connected(struct lightningd *ld, const u8 *msg)
{
struct node_id id;
struct peer *peer;
if (!fromwire_connectd_peer_already_connected(msg, &id))
fatal("Bad msg %s from connectd", tal_hex(tmpctx, msg));
peer = peer_by_id(ld, &id);
if (peer)
connect_succeeded(ld, peer,
peer->connected_incoming,
&peer->addr);
}
static void peer_please_disconnect(struct lightningd *ld, const u8 *msg)
{
struct node_id id;
struct channel *c;
struct uncommitted_channel *uc;
if (!fromwire_connectd_reconnected(msg, &id))
fatal("Bad msg %s from connectd", tal_hex(tmpctx, msg));
c = active_channel_by_id(ld, &id, &uc);
if (uc)
kill_uncommitted_channel(uc, "Reconnected");
else if (c) {
channel_cleanup_commands(c, "Reconnected");
channel_fail_reconnect(c, "Reconnected");
}
else if ((c = unsaved_channel_by_id(ld, &id))) {
log_info(c->log, "Killing opening daemon: Reconnected");
channel_unsaved_close_conn(c, "Reconnected");
}
}
struct custommsg_payload {
struct node_id peer_id;
u8 *msg;
};
static bool custommsg_cb(struct custommsg_payload *payload,
const char *buffer, const jsmntok_t *toks)
{
const jsmntok_t *t_res;
if (!toks || !buffer)
return true;
t_res = json_get_member(buffer, toks, "result");
/* fail */
if (!t_res || !json_tok_streq(buffer, t_res, "continue"))
fatal("Plugin returned an invalid response to the "
"custommsg hook: %s", buffer);
/* call next hook */
return true;
}
static void custommsg_final(struct custommsg_payload *payload STEALS)
{
tal_steal(tmpctx, payload);
}
static void custommsg_payload_serialize(struct custommsg_payload *payload,
struct json_stream *stream,
struct plugin *plugin)
{
json_add_hex_talarr(stream, "payload", payload->msg);
json_add_node_id(stream, "peer_id", &payload->peer_id);
}
REGISTER_PLUGIN_HOOK(custommsg,
custommsg_cb,
custommsg_final,
custommsg_payload_serialize,
struct custommsg_payload *);
static void handle_custommsg_in(struct lightningd *ld, const u8 *msg)
{
struct custommsg_payload *p = tal(NULL, struct custommsg_payload);
if (!fromwire_connectd_custommsg_in(p, msg, &p->peer_id, &p->msg)) {
log_broken(ld->log, "Malformed custommsg: %s",
tal_hex(tmpctx, msg));
tal_free(p);
return;
}
plugin_hook_call_custommsg(ld, p);
}
static unsigned connectd_msg(struct subd *connectd, const u8 *msg, const int *fds)
{
enum connectd_wire t = fromwire_peektype(msg);
switch (t) {
/* These are messages we send, not them. */
case WIRE_CONNECTD_INIT:
case WIRE_CONNECTD_ACTIVATE:
case WIRE_CONNECTD_CONNECT_TO_PEER:
case WIRE_CONNECTD_DISCARD_PEER:
case WIRE_CONNECTD_DEV_MEMLEAK:
case WIRE_CONNECTD_PEER_FINAL_MSG:
case WIRE_CONNECTD_PEER_MAKE_ACTIVE:
case WIRE_CONNECTD_PING:
case WIRE_CONNECTD_SEND_ONIONMSG:
case WIRE_CONNECTD_CUSTOMMSG_OUT:
/* This is a reply, so never gets through to here. */
case WIRE_CONNECTD_INIT_REPLY:
case WIRE_CONNECTD_ACTIVATE_REPLY:
case WIRE_CONNECTD_DEV_MEMLEAK_REPLY:
case WIRE_CONNECTD_PING_REPLY:
break;
case WIRE_CONNECTD_RECONNECTED:
peer_please_disconnect(connectd->ld, msg);
break;
case WIRE_CONNECTD_PEER_CONNECTED:
peer_connected(connectd->ld, msg);
break;
case WIRE_CONNECTD_PEER_ACTIVE:
if (tal_count(fds) != 1)
return 1;
peer_active(connectd->ld, msg, fds[0]);
break;
openingd: take peer before we're opening, wait for explicit funding msg. Prior to this, lightningd would hand uninteresting peers back to connectd, which would then return it to lightningd if it sent a non-gossip msg, or if lightningd asked it to release the peer. Now connectd hands the peer to lightningd once we've done the init handshake, which hands it off to openingd. This is a deep structural change, so we do the minimum here and cleanup in the following patches. Lightningd: 1. Remove peer_nongossip handling from connect_control and peer_control. 2. Remove list of outstanding fundchannel command; it was only needed to find the race between us asking connectd to release the peer and it reconnecting. 3. We can no longer tell if the remote end has started trying to fund a channel (until it has succeeded): it's very transitory anyway so not worth fixing. 4. We now always have a struct peer, and allocate an uncommitted_channel for it, though it may never be used if neither end funds a channel. 5. We start funding on messages for openingd: we can get a funder_reply or a fundee, or an error in response to our request to fund a channel. so we handle all of them. 6. A new peer_start_openingd() is called after connectd hands us a peer. 7. json_fund_channel just looks through local peers; there are none hidden in connectd any more. 8. We sometimes start a new openingd just to send an error message. Openingd: 1. We always have information we need to accept them funding a channel (in the init message). 2. We have to listen for three fds: peer, gossip and master, so we opencode the poll. 3. We have an explicit message to start trying to fund a channel. 4. We can be told to send a message in our init message. Testing: 1. We don't handle some things gracefully yet, so two tests are disabled. 2. 'hand_back_peer .*: now local again' from connectd is no longer a message, openingd says 'Handed peer, entering loop' once its managing it. 3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't exist; 'state' is now per-channel. It doesn't exist at all now. 4. Some tests now need to turn on IO logging in openingd, not connectd. 5. There's a gap between connecting on one node and having connectd on the peer hand over the connection to openingd. Our tests sometimes checked getpeers() on the peer, and didn't see anything, so line_graph needed updating. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 15:10:58 +01:00
case WIRE_CONNECTD_PEER_DISCONNECT_DONE:
peer_disconnect_done(connectd->ld, msg);
break;
case WIRE_CONNECTD_PEER_ALREADY_CONNECTED:
peer_already_connected(connectd->ld, msg);
break;
case WIRE_CONNECTD_CONNECT_FAILED:
connect_failed(connectd->ld, msg);
break;
case WIRE_CONNECTD_GOT_ONIONMSG_TO_US:
handle_onionmsg_to_us(connectd->ld, msg);
break;
case WIRE_CONNECTD_CUSTOMMSG_IN:
handle_custommsg_in(connectd->ld, msg);
break;
}
return 0;
}
static void connect_init_done(struct subd *connectd,
const u8 *reply,
const int *fds UNUSED,
void *unused UNUSED)
{
struct lightningd *ld = connectd->ld;
connectd: be more graceful when we an address is in use. Aditya had this issue due to a config line, and the result was hard to diagnose even for me. It's now: ``` $ ./lightningd/lightningd --network=regtest --addr=:18444 2022-02-26T05:01:28.705Z **BROKEN** connectd: Failed to bind socket for 0.0.0.0:18444: Address already in use ``` Whereas before it doesn't even give the address it's trying to bind: ``` rusty@rusty-XPS-13-9370:~/devel/cvs/lightning (master)$ ./lightningd/lightningd --network=regtest --addr=:18444 lightning_connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 0x558a8b8d9a12 send_backtrace common/daemon.c:33 0x558a8b8e91e1 status_failed common/status.c:221 0x558a8b8c8e4f make_listen_fd connectd/connectd.c:1090 0x558a8b8c8f55 handle_wireaddr_listen connectd/connectd.c:1129 0x558a8b8c993d setup_listeners connectd/connectd.c:1312 0x558a8b8ca344 connect_init connectd/connectd.c:1517 0x558a8b8cbb57 recv_req connectd/connectd.c:1896 0x558a8b8d9f9f handle_read common/daemon_conn.c:31 0x558a8b9247c1 next_plan ccan/ccan/io/io.c:59 0x558a8b9253c9 do_plan ccan/ccan/io/io.c:407 0x558a8b92540b io_ready ccan/ccan/io/io.c:417 0x558a8b9276fe io_loop ccan/ccan/io/poll.c:453 0x558a8b8cbf36 main connectd/connectd.c:2033 0x7fe4d02940b2 ??? ???:0 0x558a8b8c285d ??? ???:0 0xffffffffffffffff ??? ???:0 2022-02-26T05:02:27.547Z **BROKEN** connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/daemon.c:38 (send_backtrace) 0x558a8b8d9a68 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/status.c:221 (status_failed) 0x558a8b8e91e1 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: connectd/connectd.c:1090 (make_listen_fd) 0x558a8b8c8e4f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1129 (handle_wireaddr_listen) 0x558a8b8c8f55 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1312 (setup_listeners) 0x558a8b8c993d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1517 (connect_init) 0x558a8b8ca344 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1896 (recv_req) 0x558a8b8cbb57 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: common/daemon_conn.c:31 (handle_read) 0x558a8b8d9f9f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:59 (next_plan) 0x558a8b9247c1 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:407 (do_plan) 0x558a8b9253c9 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:417 (io_ready) 0x558a8b92540b 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/poll.c:453 (io_loop) 0x558a8b9276fe 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:2033 (main) 0x558a8b8cbf36 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x7fe4d02940b2 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x558a8b8c285d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0xffffffffffffffff 2022-02-26T05:02:27.548Z **BROKEN** connectd: STATUS_FAIL_INTERNAL_ERROR: Failed to bind on 2 socket: Address already in use lightningd: connectd failed (exit status 242), exiting. ``` Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-03-03 10:24:11 +00:00
char *errmsg;
connectd: be more graceful when we an address is in use. Aditya had this issue due to a config line, and the result was hard to diagnose even for me. It's now: ``` $ ./lightningd/lightningd --network=regtest --addr=:18444 2022-02-26T05:01:28.705Z **BROKEN** connectd: Failed to bind socket for 0.0.0.0:18444: Address already in use ``` Whereas before it doesn't even give the address it's trying to bind: ``` rusty@rusty-XPS-13-9370:~/devel/cvs/lightning (master)$ ./lightningd/lightningd --network=regtest --addr=:18444 lightning_connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 0x558a8b8d9a12 send_backtrace common/daemon.c:33 0x558a8b8e91e1 status_failed common/status.c:221 0x558a8b8c8e4f make_listen_fd connectd/connectd.c:1090 0x558a8b8c8f55 handle_wireaddr_listen connectd/connectd.c:1129 0x558a8b8c993d setup_listeners connectd/connectd.c:1312 0x558a8b8ca344 connect_init connectd/connectd.c:1517 0x558a8b8cbb57 recv_req connectd/connectd.c:1896 0x558a8b8d9f9f handle_read common/daemon_conn.c:31 0x558a8b9247c1 next_plan ccan/ccan/io/io.c:59 0x558a8b9253c9 do_plan ccan/ccan/io/io.c:407 0x558a8b92540b io_ready ccan/ccan/io/io.c:417 0x558a8b9276fe io_loop ccan/ccan/io/poll.c:453 0x558a8b8cbf36 main connectd/connectd.c:2033 0x7fe4d02940b2 ??? ???:0 0x558a8b8c285d ??? ???:0 0xffffffffffffffff ??? ???:0 2022-02-26T05:02:27.547Z **BROKEN** connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/daemon.c:38 (send_backtrace) 0x558a8b8d9a68 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/status.c:221 (status_failed) 0x558a8b8e91e1 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: connectd/connectd.c:1090 (make_listen_fd) 0x558a8b8c8e4f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1129 (handle_wireaddr_listen) 0x558a8b8c8f55 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1312 (setup_listeners) 0x558a8b8c993d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1517 (connect_init) 0x558a8b8ca344 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1896 (recv_req) 0x558a8b8cbb57 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: common/daemon_conn.c:31 (handle_read) 0x558a8b8d9f9f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:59 (next_plan) 0x558a8b9247c1 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:407 (do_plan) 0x558a8b9253c9 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:417 (io_ready) 0x558a8b92540b 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/poll.c:453 (io_loop) 0x558a8b9276fe 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:2033 (main) 0x558a8b8cbf36 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x7fe4d02940b2 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x558a8b8c285d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0xffffffffffffffff 2022-02-26T05:02:27.548Z **BROKEN** connectd: STATUS_FAIL_INTERNAL_ERROR: Failed to bind on 2 socket: Address already in use lightningd: connectd failed (exit status 242), exiting. ``` Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-03-03 10:24:11 +00:00
log_debug(connectd->log, "connectd_init_done");
if (!fromwire_connectd_init_reply(ld, reply,
connectd: be more graceful when we an address is in use. Aditya had this issue due to a config line, and the result was hard to diagnose even for me. It's now: ``` $ ./lightningd/lightningd --network=regtest --addr=:18444 2022-02-26T05:01:28.705Z **BROKEN** connectd: Failed to bind socket for 0.0.0.0:18444: Address already in use ``` Whereas before it doesn't even give the address it's trying to bind: ``` rusty@rusty-XPS-13-9370:~/devel/cvs/lightning (master)$ ./lightningd/lightningd --network=regtest --addr=:18444 lightning_connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 0x558a8b8d9a12 send_backtrace common/daemon.c:33 0x558a8b8e91e1 status_failed common/status.c:221 0x558a8b8c8e4f make_listen_fd connectd/connectd.c:1090 0x558a8b8c8f55 handle_wireaddr_listen connectd/connectd.c:1129 0x558a8b8c993d setup_listeners connectd/connectd.c:1312 0x558a8b8ca344 connect_init connectd/connectd.c:1517 0x558a8b8cbb57 recv_req connectd/connectd.c:1896 0x558a8b8d9f9f handle_read common/daemon_conn.c:31 0x558a8b9247c1 next_plan ccan/ccan/io/io.c:59 0x558a8b9253c9 do_plan ccan/ccan/io/io.c:407 0x558a8b92540b io_ready ccan/ccan/io/io.c:417 0x558a8b9276fe io_loop ccan/ccan/io/poll.c:453 0x558a8b8cbf36 main connectd/connectd.c:2033 0x7fe4d02940b2 ??? ???:0 0x558a8b8c285d ??? ???:0 0xffffffffffffffff ??? ???:0 2022-02-26T05:02:27.547Z **BROKEN** connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/daemon.c:38 (send_backtrace) 0x558a8b8d9a68 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/status.c:221 (status_failed) 0x558a8b8e91e1 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: connectd/connectd.c:1090 (make_listen_fd) 0x558a8b8c8e4f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1129 (handle_wireaddr_listen) 0x558a8b8c8f55 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1312 (setup_listeners) 0x558a8b8c993d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1517 (connect_init) 0x558a8b8ca344 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1896 (recv_req) 0x558a8b8cbb57 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: common/daemon_conn.c:31 (handle_read) 0x558a8b8d9f9f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:59 (next_plan) 0x558a8b9247c1 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:407 (do_plan) 0x558a8b9253c9 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:417 (io_ready) 0x558a8b92540b 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/poll.c:453 (io_loop) 0x558a8b9276fe 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:2033 (main) 0x558a8b8cbf36 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x7fe4d02940b2 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x558a8b8c285d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0xffffffffffffffff 2022-02-26T05:02:27.548Z **BROKEN** connectd: STATUS_FAIL_INTERNAL_ERROR: Failed to bind on 2 socket: Address already in use lightningd: connectd failed (exit status 242), exiting. ``` Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-03-03 10:24:11 +00:00
&ld->binding,
&ld->announceable,
connectd: be more graceful when we an address is in use. Aditya had this issue due to a config line, and the result was hard to diagnose even for me. It's now: ``` $ ./lightningd/lightningd --network=regtest --addr=:18444 2022-02-26T05:01:28.705Z **BROKEN** connectd: Failed to bind socket for 0.0.0.0:18444: Address already in use ``` Whereas before it doesn't even give the address it's trying to bind: ``` rusty@rusty-XPS-13-9370:~/devel/cvs/lightning (master)$ ./lightningd/lightningd --network=regtest --addr=:18444 lightning_connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 0x558a8b8d9a12 send_backtrace common/daemon.c:33 0x558a8b8e91e1 status_failed common/status.c:221 0x558a8b8c8e4f make_listen_fd connectd/connectd.c:1090 0x558a8b8c8f55 handle_wireaddr_listen connectd/connectd.c:1129 0x558a8b8c993d setup_listeners connectd/connectd.c:1312 0x558a8b8ca344 connect_init connectd/connectd.c:1517 0x558a8b8cbb57 recv_req connectd/connectd.c:1896 0x558a8b8d9f9f handle_read common/daemon_conn.c:31 0x558a8b9247c1 next_plan ccan/ccan/io/io.c:59 0x558a8b9253c9 do_plan ccan/ccan/io/io.c:407 0x558a8b92540b io_ready ccan/ccan/io/io.c:417 0x558a8b9276fe io_loop ccan/ccan/io/poll.c:453 0x558a8b8cbf36 main connectd/connectd.c:2033 0x7fe4d02940b2 ??? ???:0 0x558a8b8c285d ??? ???:0 0xffffffffffffffff ??? ???:0 2022-02-26T05:02:27.547Z **BROKEN** connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/daemon.c:38 (send_backtrace) 0x558a8b8d9a68 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/status.c:221 (status_failed) 0x558a8b8e91e1 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: connectd/connectd.c:1090 (make_listen_fd) 0x558a8b8c8e4f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1129 (handle_wireaddr_listen) 0x558a8b8c8f55 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1312 (setup_listeners) 0x558a8b8c993d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1517 (connect_init) 0x558a8b8ca344 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1896 (recv_req) 0x558a8b8cbb57 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: common/daemon_conn.c:31 (handle_read) 0x558a8b8d9f9f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:59 (next_plan) 0x558a8b9247c1 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:407 (do_plan) 0x558a8b9253c9 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:417 (io_ready) 0x558a8b92540b 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/poll.c:453 (io_loop) 0x558a8b9276fe 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:2033 (main) 0x558a8b8cbf36 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x7fe4d02940b2 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x558a8b8c285d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0xffffffffffffffff 2022-02-26T05:02:27.548Z **BROKEN** connectd: STATUS_FAIL_INTERNAL_ERROR: Failed to bind on 2 socket: Address already in use lightningd: connectd failed (exit status 242), exiting. ``` Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-03-03 10:24:11 +00:00
&errmsg))
fatal("Bad connectd_init_reply: %s",
tal_hex(reply, reply));
connectd: be more graceful when we an address is in use. Aditya had this issue due to a config line, and the result was hard to diagnose even for me. It's now: ``` $ ./lightningd/lightningd --network=regtest --addr=:18444 2022-02-26T05:01:28.705Z **BROKEN** connectd: Failed to bind socket for 0.0.0.0:18444: Address already in use ``` Whereas before it doesn't even give the address it's trying to bind: ``` rusty@rusty-XPS-13-9370:~/devel/cvs/lightning (master)$ ./lightningd/lightningd --network=regtest --addr=:18444 lightning_connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 0x558a8b8d9a12 send_backtrace common/daemon.c:33 0x558a8b8e91e1 status_failed common/status.c:221 0x558a8b8c8e4f make_listen_fd connectd/connectd.c:1090 0x558a8b8c8f55 handle_wireaddr_listen connectd/connectd.c:1129 0x558a8b8c993d setup_listeners connectd/connectd.c:1312 0x558a8b8ca344 connect_init connectd/connectd.c:1517 0x558a8b8cbb57 recv_req connectd/connectd.c:1896 0x558a8b8d9f9f handle_read common/daemon_conn.c:31 0x558a8b9247c1 next_plan ccan/ccan/io/io.c:59 0x558a8b9253c9 do_plan ccan/ccan/io/io.c:407 0x558a8b92540b io_ready ccan/ccan/io/io.c:417 0x558a8b9276fe io_loop ccan/ccan/io/poll.c:453 0x558a8b8cbf36 main connectd/connectd.c:2033 0x7fe4d02940b2 ??? ???:0 0x558a8b8c285d ??? ???:0 0xffffffffffffffff ??? ???:0 2022-02-26T05:02:27.547Z **BROKEN** connectd: Failed to bind on 2 socket: Address already in use (version v0.10.2-331-g86b83e4) 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/daemon.c:38 (send_backtrace) 0x558a8b8d9a68 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: common/status.c:221 (status_failed) 0x558a8b8e91e1 2022-02-26T05:02:27.547Z **BROKEN** connectd: backtrace: connectd/connectd.c:1090 (make_listen_fd) 0x558a8b8c8e4f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1129 (handle_wireaddr_listen) 0x558a8b8c8f55 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1312 (setup_listeners) 0x558a8b8c993d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1517 (connect_init) 0x558a8b8ca344 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:1896 (recv_req) 0x558a8b8cbb57 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: common/daemon_conn.c:31 (handle_read) 0x558a8b8d9f9f 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:59 (next_plan) 0x558a8b9247c1 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:407 (do_plan) 0x558a8b9253c9 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/io.c:417 (io_ready) 0x558a8b92540b 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: ccan/ccan/io/poll.c:453 (io_loop) 0x558a8b9276fe 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: connectd/connectd.c:2033 (main) 0x558a8b8cbf36 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x7fe4d02940b2 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0x558a8b8c285d 2022-02-26T05:02:27.548Z **BROKEN** connectd: backtrace: (null):0 ((null)) 0xffffffffffffffff 2022-02-26T05:02:27.548Z **BROKEN** connectd: STATUS_FAIL_INTERNAL_ERROR: Failed to bind on 2 socket: Address already in use lightningd: connectd failed (exit status 242), exiting. ``` Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-03-03 10:24:11 +00:00
/* connectd can fail in *informative* ways: don't use fatal() here and
* confuse things with a backtrace! */
if (errmsg) {
log_broken(connectd->log, "%s", errmsg);
exit(1);
}
/* Break out of loop, so we can begin */
io_break(connectd);
}
int connectd_init(struct lightningd *ld)
{
int fds[2];
u8 *msg;
int hsmfd;
struct wireaddr_internal *wireaddrs = ld->proposed_wireaddr;
enum addr_listen_announce *listen_announce = ld->proposed_listen_announce;
const char *websocket_helper_path;
websocket_helper_path = subdaemon_path(tmpctx, ld,
"lightning_websocketd");
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0)
fatal("Could not socketpair for connectd<->gossipd");
hsmfd = hsm_get_global_fd(ld, HSM_CAP_ECDH);
ld->connectd = new_global_subd(ld, "lightning_connectd",
connectd_wire_name, connectd_msg,
take(&hsmfd), take(&fds[1]),
#if DEVELOPER
/* Not take(): we share it */
ld->dev_disconnect_fd >= 0 ?
&ld->dev_disconnect_fd : NULL,
#endif
NULL);
if (!ld->connectd)
err(1, "Could not subdaemon connectd");
/* If no addr specified, hand wildcard to connectd */
if (tal_count(wireaddrs) == 0 && ld->autolisten) {
wireaddrs = tal_arrz(tmpctx, struct wireaddr_internal, 1);
listen_announce = tal_arr(tmpctx, enum addr_listen_announce, 1);
wireaddrs->itype = ADDR_INTERNAL_ALLPROTO;
wireaddrs->u.port = ld->portnum;
*listen_announce = ADDR_LISTEN_AND_ANNOUNCE;
}
msg = towire_connectd_init(
tmpctx, chainparams,
ld->our_features,
&ld->id,
wireaddrs,
listen_announce,
ld->proxyaddr, ld->always_use_proxy || ld->pure_tor_setup,
IFDEV(ld->dev_allow_localhost, false), ld->config.use_dns,
ld->tor_service_password ? ld->tor_service_password : "",
ld->config.use_v3_autotor,
ld->config.connection_timeout_secs,
websocket_helper_path,
ld->websocket_port,
IFDEV(ld->dev_fast_gossip, false),
IFDEV(ld->dev_disconnect_fd >= 0, false));
subd_req(ld->connectd, ld->connectd, take(msg), -1, 0,
connect_init_done, NULL);
/* Wait for init_reply */
io_loop(NULL, NULL);
return fds[0];
}
static void connect_activate_done(struct subd *connectd,
const u8 *reply,
const int *fds UNUSED,
void *unused UNUSED)
{
char *errmsg;
if (!fromwire_connectd_activate_reply(reply, reply, &errmsg))
fatal("Bad connectd_activate_reply: %s",
tal_hex(reply, reply));
/* connectd can fail in *informative* ways: don't use fatal() here and
* confuse things with a backtrace! */
if (errmsg) {
log_broken(connectd->log, "%s", errmsg);
exit(1);
}
/* Break out of loop, so we can begin */
io_break(connectd);
}
void connectd_activate(struct lightningd *ld)
{
const u8 *msg = towire_connectd_activate(NULL, ld->listen);
subd_req(ld->connectd, ld->connectd, take(msg), -1, 0,
connect_activate_done, NULL);
/* Wait for activate_reply */
io_loop(NULL, NULL);
}
static struct command_result *json_sendcustommsg(struct command *cmd,
const char *buffer,
const jsmntok_t *obj UNNEEDED,
const jsmntok_t *params)
{
struct json_stream *response;
struct node_id *dest;
struct peer *peer;
u8 *msg;
int type;
if (!param(cmd, buffer, params,
p_req("node_id", param_node_id, &dest),
p_req("msg", param_bin_from_hex, &msg),
NULL))
return command_param_failed();
type = fromwire_peektype(msg);
if (peer_wire_is_defined(type)) {
return command_fail(
cmd, JSONRPC2_INVALID_REQUEST,
"Cannot send messages of type %d (%s). It is not possible "
"to send messages that have a type managed internally "
"since that might cause issues with the internal state "
"tracking.",
type, peer_wire_name(type));
}
if (type % 2 == 0) {
return command_fail(
cmd, JSONRPC2_INVALID_REQUEST,
"Cannot send even-typed %d custom message. Currently "
"custom messages are limited to odd-numbered message "
"types, as even-numbered types might result in "
"disconnections.",
type);
}
peer = peer_by_id(cmd->ld, dest);
if (!peer) {
return command_fail(cmd, JSONRPC2_INVALID_REQUEST,
"No such peer: %s",
type_to_string(cmd, struct node_id, dest));
}
if (!peer->is_connected) {
return command_fail(cmd, JSONRPC2_INVALID_REQUEST,
"Peer is not connected: %s",
type_to_string(cmd, struct node_id, dest));
}
subd_send_msg(cmd->ld->connectd,
take(towire_connectd_custommsg_out(cmd, dest, msg)));
response = json_stream_success(cmd);
json_add_string(response, "status",
"Message sent to connectd for delivery");
return command_success(cmd, response);
}
static const struct json_command sendcustommsg_command = {
"sendcustommsg",
"utility",
json_sendcustommsg,
"Send a custom message to the peer with the given {node_id}",
.verbose = "sendcustommsg node_id hexcustommsg",
};
AUTODATA(json_command, &sendcustommsg_command);
#ifdef COMPAT_V0100
#ifdef DEVELOPER
static const struct json_command dev_sendcustommsg_command = {
"dev-sendcustommsg",
"utility",
json_sendcustommsg,
"Send a custom message to the peer with the given {node_id}",
.verbose = "dev-sendcustommsg node_id hexcustommsg",
};
AUTODATA(json_command, &dev_sendcustommsg_command);
#endif /* DEVELOPER */
#endif /* COMPAT_V0100 */