lightningd: multiple log-file options allow more than one log output.

I've wanted this for a while: the ability to log to multiple places
at once.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: lightningd: `log-file` option specified multiple times opens multiple log files.
This commit is contained in:
Rusty Russell 2022-06-26 13:55:01 +09:30
parent 92f10f2c34
commit 56dde2cb77
6 changed files with 116 additions and 65 deletions

View File

@ -155,8 +155,10 @@ Prefix for log lines: this can be customized if you want to merge logs
with multiple daemons.
**log-file**=*PATH*
Log to this file instead of stdout. Sending lightningd(8) SIGHUP will
cause it to reopen this file (useful for log rotation).
Log to this file (instead of stdout). If you specify this more than once
you'll get more than one log file: **-** is used to mean stdout. Sending
lightningd(8) SIGHUP will cause it to reopen each file (useful for log
rotation).
**log-timestamps**=*BOOL*
Set this to false to turn off timestamp prefixes (they will still appear

View File

@ -184,7 +184,7 @@ static struct lightningd *new_lightningd(const tal_t *ctx)
* allocation function): ld->log will be implicitly freed when ld
* is. */
ld->log = new_log(ld, ld->log_book, NULL, "lightningd");
ld->logfile = NULL;
ld->logfiles = NULL;
/*~ We explicitly set these to NULL: if they're still NULL after option
* parsing, we know they're to be set to the defaults. */

View File

@ -110,7 +110,7 @@ struct lightningd {
struct log_book *log_book;
/* Log for general stuff. */
struct log *log;
const char *logfile;
const char **logfiles;
/* This is us. */
struct node_id id;

View File

@ -39,7 +39,9 @@ struct log_book {
/* Non-null once it's been initialized */
enum log_level *default_print_level;
struct timeabs init_time;
FILE *outf;
/* Array of log files: one per ld->logfiles[] */
FILE **outfiles;
bool print_timestamps;
struct log_entry *log;
@ -128,17 +130,18 @@ static const char *level_prefix(enum log_level level)
abort();
}
static void log_to_file(const char *prefix,
enum log_level level,
const struct node_id *node_id,
const struct timeabs *time,
const char *str,
const u8 *io,
size_t io_len,
bool print_timestamps,
FILE *logf)
static void log_to_files(const char *prefix,
enum log_level level,
const struct node_id *node_id,
const struct timeabs *time,
const char *str,
const u8 *io,
size_t io_len,
bool print_timestamps,
FILE **outfiles)
{
char tstamp[sizeof("YYYY-mm-ddTHH:MM:SS.nnnZ ")];
char *entry;
if (print_timestamps) {
char iso8601_msec_fmt[sizeof("YYYY-mm-ddTHH:MM:SS.%03dZ ")];
@ -151,25 +154,34 @@ static void log_to_file(const char *prefix,
const char *dir = level == LOG_IO_IN ? "[IN]" : "[OUT]";
char *hex = tal_hexstr(NULL, io, io_len);
if (!node_id)
fprintf(logf, "%s%s: %s%s %s\n",
tstamp, prefix, str, dir, hex);
entry = tal_fmt(tmpctx, "%s%s: %s%s %s\n",
tstamp, prefix, str, dir, hex);
else
fprintf(logf, "%s%s-%s: %s%s %s\n",
tstamp,
node_id_to_hexstr(tmpctx, node_id),
prefix, str, dir, hex);
entry = tal_fmt(tmpctx, "%s%s-%s: %s%s %s\n",
tstamp,
node_id_to_hexstr(tmpctx, node_id),
prefix, str, dir, hex);
tal_free(hex);
} else {
if (!node_id)
fprintf(logf, "%s%s %s: %s\n",
tstamp, level_prefix(level), prefix, str);
entry = tal_fmt(tmpctx, "%s%s %s: %s\n",
tstamp, level_prefix(level), prefix, str);
else
fprintf(logf, "%s%s %s-%s: %s\n",
tstamp, level_prefix(level),
node_id_to_hexstr(tmpctx, node_id),
prefix, str);
entry = tal_fmt(tmpctx, "%s%s %s-%s: %s\n",
tstamp, level_prefix(level),
node_id_to_hexstr(tmpctx, node_id),
prefix, str);
}
/* Default if nothing set is stdout */
if (!outfiles) {
fwrite(entry, strlen(entry), 1, stdout);
fflush(stdout);
}
for (size_t i = 0; i < tal_count(outfiles); i++) {
fwrite(entry, strlen(entry), 1, outfiles[i]);
fflush(outfiles[i]);
}
fflush(logf);
}
static size_t mem_used(const struct log_entry *e)
@ -264,7 +276,7 @@ struct log_book *new_log_book(struct lightningd *ld, size_t max_mem)
lr->mem_used = 0;
lr->num_entries = 0;
lr->max_mem = max_mem;
lr->outf = stdout;
lr->outfiles = NULL;
lr->default_print_level = NULL;
list_head_init(&lr->print_filters);
lr->init_time = time_now();
@ -389,12 +401,12 @@ static struct log_entry *new_log_entry(struct log *log, enum log_level level,
static void maybe_print(struct log *log, const struct log_entry *l)
{
if (l->level >= log_print_level(log))
log_to_file(log->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, l->log,
l->io, tal_bytelen(l->io),
log->lr->print_timestamps,
log->lr->outf);
log_to_files(log->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, l->log,
l->io, tal_bytelen(l->io),
log->lr->print_timestamps,
log->lr->outfiles);
}
void logv(struct log *log, enum log_level level,
@ -439,12 +451,12 @@ void log_io(struct log *log, enum log_level dir,
/* Print first, in case we need to truncate. */
if (l->level >= log_print_level(log))
log_to_file(log->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, str,
data, len,
log->lr->print_timestamps,
log->lr->outf);
log_to_files(log->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, str,
data, len,
log->lr->print_timestamps,
log->lr->outfiles);
/* Save a tal header, by using raw malloc. */
l->log = strdup(str);
@ -608,7 +620,7 @@ static void show_log_level(char buf[OPT_SHOW_LEN], const struct log *log)
static char *arg_log_prefix(const char *arg, struct log *log)
{
/* log->lr owns this, since it keeps a pointer to it. */
tal_free(log->prefix);
log_prefix_drop(log->prefix);
log->prefix = log_prefix_new(log->lr, arg);
return NULL;
}
@ -635,11 +647,14 @@ static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld);
static struct io_plan *rotate_log(struct io_conn *conn, struct lightningd *ld)
{
log_info(ld->log, "Ending log due to SIGHUP");
fclose(ld->log->lr->outf);
ld->log->lr->outf = fopen(ld->logfile, "a");
if (!ld->log->lr->outf)
err(1, "failed to reopen log file %s", ld->logfile);
for (size_t i = 0; i < tal_count(ld->log->lr->outfiles); i++) {
if (streq(ld->logfiles[i], "-"))
continue;
fclose(ld->log->lr->outfiles[i]);
ld->log->lr->outfiles[i] = fopen(ld->logfiles[i], "a");
if (!ld->log->lr->outfiles[i])
err(1, "failed to reopen log file %s", ld->logfiles[i]);
}
log_info(ld->log, "Started log due to SIGHUP");
return setup_read(conn, ld);
@ -688,22 +703,29 @@ static void setup_log_rotation(struct lightningd *ld)
char *arg_log_to_file(const char *arg, struct lightningd *ld)
{
int size;
FILE *outf;
if (ld->logfile) {
fclose(ld->log->lr->outf);
ld->logfile = tal_free(ld->logfile);
} else
if (!ld->logfiles) {
setup_log_rotation(ld);
ld->logfiles = tal_arr(ld, const char *, 0);
ld->log->lr->outfiles = tal_arr(ld->log->lr, FILE *, 0);
}
ld->logfile = tal_strdup(ld, arg);
ld->log->lr->outf = fopen(arg, "a");
if (!ld->log->lr->outf)
return tal_fmt(NULL, "Failed to open: %s", strerror(errno));
if (streq(arg, "-"))
outf = stdout;
else {
outf = fopen(arg, "a");
if (!outf)
return tal_fmt(NULL, "Failed to open: %s", strerror(errno));
}
tal_arr_expand(&ld->logfiles, tal_strdup(ld->logfiles, arg));
tal_arr_expand(&ld->log->lr->outfiles, outf);
/* For convenience make a block of empty lines just like Bitcoin Core */
size = ftell(ld->log->lr->outf);
size = ftell(outf);
if (size > 0)
fprintf(ld->log->lr->outf, "\n\n\n\n");
fprintf(outf, "\n\n\n\n");
log_debug(ld->log, "Opened log file %s", arg);
return NULL;
@ -721,7 +743,7 @@ void opt_register_logging(struct lightningd *ld)
ld->log,
"log prefix");
opt_register_early_arg("--log-file=<file>", arg_log_to_file, NULL, ld,
"log to file instead of stdout");
"Also log to file (- for stdout)");
}
void logging_options_parsed(struct log_book *lr)
@ -737,12 +759,12 @@ void logging_options_parsed(struct log_book *lr)
const struct log_entry *l = &lr->log[i];
if (l->level >= filter_level(lr, l->prefix))
log_to_file(l->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, l->log,
l->io, tal_bytelen(l->io),
lr->print_timestamps,
lr->outf);
log_to_files(l->prefix->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
&l->time, l->log,
l->io, tal_bytelen(l->io),
lr->print_timestamps,
lr->outfiles);
}
}

View File

@ -880,7 +880,7 @@ static void check_config(struct lightningd *ld)
if (ld->always_use_proxy && !ld->proxyaddr)
fatal("--always-use-proxy needs --proxy");
if (ld->daemon_parent_fd != -1 && !ld->logfile)
if (ld->daemon_parent_fd != -1 && !ld->logfiles)
fatal("--daemon needs --log-file");
}
@ -1438,6 +1438,14 @@ static void json_add_opt_addrs(struct json_stream *response,
}
}
static void json_add_opt_log_to_files(struct json_stream *response,
const char *name0,
const char **logfiles)
{
for (size_t i = 0; i < tal_count(logfiles); i++)
json_add_string(response, name0, logfiles[i]);
}
struct json_add_opt_alt_subdaemon_args {
const char *name0;
struct json_stream *response;
@ -1568,7 +1576,8 @@ static void add_config(struct lightningd *ld,
} else if (opt->cb_arg == (void *)opt_set_alias) {
answer = (const char *)ld->alias;
} else if (opt->cb_arg == (void *)arg_log_to_file) {
answer = ld->logfile;
if (ld->logfiles)
json_add_opt_log_to_files(response, name0, ld->logfiles);
} else if (opt->cb_arg == (void *)opt_add_addr) {
json_add_opt_addrs(response, name0,
ld->proposed_wireaddr,

View File

@ -1475,6 +1475,24 @@ def test_logging(node_factory):
wait_for(lambda: os.path.exists(logpath))
wait_for(check_new_log)
# Multiple log files
l2 = node_factory.get_node(options={'log-file': ['logfile1', 'logfile2']}, start=False)
logpath1 = os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'logfile1')
logpath2 = os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'logfile2')
l2.daemon.start(wait_for_initialized=False)
wait_for(lambda: os.path.exists(logpath1))
wait_for(lambda: os.path.exists(logpath2))
wait_for(lambda: os.path.exists(os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, "lightning-rpc")))
lines = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l2.daemon.lightning_dir),
'-H',
'listconfigs']).decode('utf-8').splitlines()
assert 'log-file=logfile1' in lines
assert 'log-file=logfile2' in lines
@unittest.skipIf(VALGRIND,
"Valgrind sometimes fails assert on injected SEGV")