Log level filter work

This commit is contained in:
Mike Dilger 2024-02-21 14:28:13 +13:00
parent 343e352f44
commit 5ab2098244
9 changed files with 289 additions and 66 deletions

View File

@ -7,12 +7,6 @@ use std::fs::OpenOptions;
use std::io::Read;
fn main() -> Result<(), Error> {
env_logger::builder()
.format_target(false)
.format_module_path(false)
.format_timestamp_millis()
.init();
// Get args (config path)
let mut args = env::args();
if args.len() <= 1 {
@ -27,7 +21,17 @@ fn main() -> Result<(), Error> {
file.read_to_string(&mut contents)?;
let friendly_config: FriendlyConfig = toml::from_str(&contents)?;
let mut config: Config = friendly_config.into_config()?;
log::debug!("Loaded config file.");
env_logger::Builder::new()
.filter_level(config.library_log_level)
.filter(Some("Server"), config.server_log_level)
.filter(Some("Client"), config.client_log_level)
.format_target(true)
.format_module_path(false)
.format_timestamp_millis()
.init();
log::debug!(target: "Server", "Loaded config file.");
// Force allow of scraping (this program is a scraper)
config.allow_scraping = true;

View File

@ -36,12 +36,6 @@ use tungstenite::Message;
#[tokio::main]
async fn main() -> Result<(), Error> {
env_logger::builder()
.format_target(false)
.format_module_path(false)
.format_timestamp_millis()
.init();
// Get args (config path)
let mut args = env::args();
if args.len() <= 1 {
@ -56,7 +50,17 @@ async fn main() -> Result<(), Error> {
file.read_to_string(&mut contents)?;
let friendly_config: FriendlyConfig = toml::from_str(&contents)?;
let config: Config = friendly_config.into_config()?;
log::debug!("Loaded config file.");
env_logger::Builder::new()
.filter_level(config.library_log_level)
.filter(Some("Server"), config.server_log_level)
.filter(Some("Client"), config.client_log_level)
.format_target(true)
.format_module_path(false)
.format_timestamp_millis()
.init();
log::debug!(target: "Server", "Loaded config file.");
// Setup store
let store = Store::new(&config.data_directory, config.allow_scraping)?;
@ -64,16 +68,16 @@ async fn main() -> Result<(), Error> {
// TLS setup
let maybe_tls_acceptor = if config.use_tls {
log::info!("Using TLS");
log::info!(target: "Server", "Using TLS");
Some(tls::tls_acceptor(&config)?)
} else {
log::info!("Not using TLS");
log::info!(target: "Server", "Not using TLS");
None
};
// Bind listener to port
let listener = TcpListener::bind((&*config.ip_address, config.port)).await?;
log::info!("Running on {}:{}", config.ip_address, config.port);
log::info!(target: "Server", "Running on {}:{}", config.ip_address, config.port);
// Store config into GLOBALS
let _ = GLOBALS.config.set(config);
@ -86,15 +90,15 @@ async fn main() -> Result<(), Error> {
tokio::select! {
// Exits gracefully upon exit-type signals
v = interrupt_signal.recv() => if v.is_some() {
log::info!("SIGINT");
log::info!(target: "Server", "SIGINT");
break;
},
v = quit_signal.recv() => if v.is_some() {
log::info!("SIGQUIT");
log::info!(target: "Server", "SIGQUIT");
break;
},
v = terminate_signal.recv() => if v.is_some() {
log::info!("SIGTERM");
log::info!(target: "Server", "SIGTERM");
break;
},
@ -106,7 +110,8 @@ async fn main() -> Result<(), Error> {
if let Some(ip_data) = GLOBALS.ip_data.get(&ipaddr) {
let now = Time::now();
if ip_data.ban_until > now {
log::debug!("{peer_addr}: Blocking reconnection until {}",
log::debug!(target: "Client",
"{peer_addr}: Blocking reconnection until {}",
ip_data.ban_until);
continue;
}
@ -116,10 +121,16 @@ async fn main() -> Result<(), Error> {
let tls_acceptor_clone = tls_acceptor.clone();
tokio::spawn(async move {
match tls_acceptor_clone.accept(tcp_stream).await {
Err(e) => log::error!("{}: {}", peer_addr, e),
Err(e) => log::error!(
target: "Client",
"{}: {}", peer_addr, e
),
Ok(tls_stream) => {
if let Err(e) = serve(MaybeTlsStream::Rustls(tls_stream), peer_addr).await {
log::error!("{}: {}", peer_addr, e);
log::error!(
target: "Client",
"{}: {}", peer_addr, e
);
}
}
}
@ -140,7 +151,7 @@ async fn main() -> Result<(), Error> {
// Wait for active websockets to shutdown gracefully
let mut num_clients = GLOBALS.num_clients.load(Ordering::Relaxed);
if num_clients != 0 {
log::info!("Waiting for {num_clients} websockets to shutdown...");
log::info!(target: "Server", "Waiting for {num_clients} websockets to shutdown...");
// We will check if all clients have shutdown every 25ms
let interval = tokio::time::interval(Duration::from_millis(25));
@ -166,7 +177,7 @@ async fn main() -> Result<(), Error> {
}
}
log::info!("Syncing and shutting down.");
log::info!(target: "Server", "Syncing and shutting down.");
let _ = GLOBALS.store.get().unwrap().sync();
Ok(())
@ -190,12 +201,12 @@ async fn serve(stream: MaybeTlsStream<TcpStream>, peer_addr: SocketAddr) -> Resu
// do nothing
} else {
// Print in detail
log::error!("{}: {:?}", peer_addr, src);
log::error!(target: "Client", "{}: {:?}", peer_addr, src);
}
} else {
// Print in less detail
let e: Error = he.into();
log::error!("{}: {}", peer_addr, e);
log::error!(target: "Client", "{}: {}", peer_addr, e);
}
}
});
@ -277,7 +288,10 @@ async fn handle_http_request(
// Increment count of active websockets
let old_num_websockets = GLOBALS.num_clients.fetch_add(1, Ordering::SeqCst);
// we cheat somewhat and log these websocket open and close messages
// as server messages
log::info!(
target: "Server",
"{}: TOTAL={}, New Connection: {}",
peer,
old_num_websockets + 1,
@ -308,7 +322,7 @@ async fn handle_http_request(
msg = "Timed Out (with no subscriptions)";
}
_ => {
log::error!("{}: {}", peer, e);
log::error!(target: "Client", "{}: {}", peer, e);
bankind = Ban::ErrorExit;
msg = "Error Exited";
}
@ -321,7 +335,10 @@ async fn handle_http_request(
// Ban for the appropriate duration
let ban_seconds = Globals::ban(peer.ip(), bankind);
// we cheat somewhat and log these websocket open and close messages
// as server messages
log::info!(
target: "Server",
"{}: TOTAL={}, {}, ban={}s",
peer,
old_num_websockets - 1,
@ -330,7 +347,7 @@ async fn handle_http_request(
);
}
Err(e) => {
log::error!("{}: {}", peer, e);
log::error!(target: "Client", "{}: {}", peer, e);
}
}
});
@ -397,7 +414,7 @@ impl WebSocketService {
let message = message?;
if let Err(e) = self.handle_websocket_message(message).await {
if let Err(e) = self.websocket.close(None).await {
log::info!("{}: Err on websocket close: {e}", self.peer);
log::info!(target: "Client", "{}: Err on websocket close: {e}", self.peer);
}
return Err(e);
}
@ -456,16 +473,16 @@ impl WebSocketService {
async fn handle_websocket_message(&mut self, message: Message) -> Result<(), Error> {
match message {
Message::Text(msg) => {
log::trace!("{}: <= {}", self.peer, msg);
log::trace!(target: "Client", "{}: <= {}", self.peer, msg);
self.replied = false;
// This is defined in nostr.rs
if let Err(e) = self.handle_nostr_message(&msg).await {
self.error_punishment += e.inner.punishment();
log::error!("{}: {e}", self.peer);
log::error!(target: "Client", "{}: {e}", self.peer);
if msg.len() < 2048 {
log::error!("{}: msg was {}", self.peer, msg);
log::error!(target: "Client", "{}: msg was {}", self.peer, msg);
} else {
log::error!("{}: truncated msg was {} ...", self.peer, &msg[..2048]);
log::error!(target: "Client", "{}: truncated msg was {} ...", self.peer, &msg[..2048]);
}
if !self.replied {
let reply = NostrReply::Notice(format!("error: {}", e));
@ -483,7 +500,7 @@ impl WebSocketService {
"binary messages are not processed by this relay".to_owned(),
);
self.websocket.send(Message::text(reply.as_json())).await?;
log::info!(
log::info!(target: "Client",
"{}: Received unhandled binary message: {:02X?}",
self.peer,
msg
@ -491,22 +508,22 @@ impl WebSocketService {
}
Message::Ping(msg) => {
// No need to send a reply: tungstenite takes care of this for you.
log::debug!("{}: Received ping message: {:02X?}", self.peer, msg);
log::debug!(target: "Client", "{}: Received ping message: {:02X?}", self.peer, msg);
}
Message::Pong(msg) => {
log::debug!("{}: Received pong message: {:02X?}", self.peer, msg);
log::debug!(target: "Client", "{}: Received pong message: {:02X?}", self.peer, msg);
}
Message::Close(msg) => {
// No need to send a reply: tungstenite takes care of this for you.
if let Some(msg) = &msg {
log::debug!(
log::debug!(target: "Client",
"{}: Received close message with code {} and message: {}",
self.peer,
msg.code,
msg.reason
);
} else {
log::debug!("{}: Received close message", self.peer);
log::debug!(target: "Client", "{}: Received close message", self.peer);
}
}
Message::Frame(_msg) => {

View File

@ -33,7 +33,7 @@ impl WebSocketService {
} else if &input[inpos..inpos + 5] == b"AUTH\"" {
self.auth(msg, inpos + 5).await?;
} else {
log::warn!("{}: Received unhandled text message: {}", self.peer, msg);
log::warn!(target: "Client", "{}: Received unhandled text message: {}", self.peer, msg);
let reply = NostrReply::Notice("Command unrecognized".to_owned());
self.websocket.send(Message::text(reply.as_json())).await?;
self.replied = true;
@ -159,7 +159,7 @@ impl WebSocketService {
self.replied = true;
log::debug!(
log::debug!(target: "Client",
"{}: new subscription \"{subid}\", {} total",
self.peer,
self.subscriptions.len()
@ -199,11 +199,11 @@ impl WebSocketService {
"That event is deleted".to_string(),
),
ChorusError::EventIsInvalid(ref why) => {
log::error!("{}: {}", self.peer, e);
log::error!(target: "Client", "{}: {}", self.peer, e);
NostrReply::Ok(id, false, NostrReplyPrefix::Invalid, why.to_string())
}
ChorusError::Restricted => {
log::error!("{}: {}", self.peer, e);
log::error!(target: "Client", "{}: {}", self.peer, e);
NostrReply::Ok(
id,
false,

View File

@ -5,7 +5,7 @@ use hyper::{Body, Request, Response, StatusCode};
use std::net::SocketAddr;
pub async fn serve_http(peer: SocketAddr, request: Request<Body>) -> Result<Response<Body>, Error> {
log::debug!("{}: HTTP request for {}", peer, request.uri());
log::debug!(target: "Client", "{}: HTTP request for {}", peer, request.uri());
let response = Response::builder()
.header("Access-Control-Allow-Origin", "*")
.header("Access-Control-Allow-Headers", "*")
@ -16,7 +16,7 @@ pub async fn serve_http(peer: SocketAddr, request: Request<Body>) -> Result<Resp
}
pub async fn serve_nip11(peer: SocketAddr) -> Result<Response<Body>, Error> {
log::debug!("{}: sent NIP-11", peer);
log::debug!(target: "Client", "{}: sent NIP-11", peer);
let rid = {
let config = GLOBALS.config.get().unwrap();
GLOBALS.rid.get_or_init(|| build_rid(config))

View File

@ -1,6 +1,7 @@
use crate::error::Error;
use crate::types::Pubkey;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use url::Host;
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -22,6 +23,9 @@ pub struct FriendlyConfig {
pub max_subscriptions: usize,
pub serve_ephemeral: bool,
pub serve_relay_lists: bool,
pub server_log_level: String,
pub library_log_level: String,
pub client_log_level: String,
}
impl Default for FriendlyConfig {
@ -44,6 +48,9 @@ impl Default for FriendlyConfig {
max_subscriptions: 32,
serve_ephemeral: true,
serve_relay_lists: true,
server_log_level: "Info".to_string(),
library_log_level: "Warn".to_string(),
client_log_level: "Error".to_string(),
}
}
}
@ -68,6 +75,9 @@ impl FriendlyConfig {
max_subscriptions,
serve_ephemeral,
serve_relay_lists,
server_log_level,
library_log_level,
client_log_level,
} = self;
let mut public_key: Option<Pubkey> = None;
@ -82,6 +92,13 @@ impl FriendlyConfig {
let hostname = Host::parse(&hostname)?;
let server_log_level =
log::LevelFilter::from_str(&server_log_level).unwrap_or(log::LevelFilter::Error);
let library_log_level =
log::LevelFilter::from_str(&library_log_level).unwrap_or(log::LevelFilter::Warn);
let client_log_level =
log::LevelFilter::from_str(&client_log_level).unwrap_or(log::LevelFilter::Info);
Ok(Config {
data_directory,
ip_address,
@ -101,6 +118,9 @@ impl FriendlyConfig {
max_subscriptions,
serve_ephemeral,
serve_relay_lists,
server_log_level,
library_log_level,
client_log_level,
})
}
}
@ -125,4 +145,7 @@ pub struct Config {
pub max_subscriptions: usize,
pub serve_ephemeral: bool,
pub serve_relay_lists: bool,
pub server_log_level: log::LevelFilter,
pub library_log_level: log::LevelFilter,
pub client_log_level: log::LevelFilter,
}

View File

@ -1,27 +1,190 @@
# This is a config file for the Chorus nostr relay
# Refer to https://github.com/mikedilger/chorus
# This is the directory where chorus stores data.
#
# Default is "/tmp".
#
# If deployed according to docs/DEPLOYING.md, is "/opt/chorus/var/chorus".
#
data_directory = "/opt/chorus/var/chorus"
# This is the IP address that chorus listens on. If deployed directly on the
# Internet, this should be an Internet globally accessible IP address.
# If proxied or if testing locally, this can be a localhost address.
#
# Default is "127.0.0.1".
#
ip_address = "127.0.0.1"
# This is the port that chorus listens on. If deployed directly on the Internet,
# this should probably be 443 which is the expected default port for the
# "wss://" protocol.
#
# Default is 443.
#
port = 443
# This is the DNS hostname of your relay.
# This is used for verifying AUTH events, which specify your relay host name.
#
hostname = "localhost"
# If true, chorus will handle TLS, running over HTTPS. If false, chorus run over HTTP.
#
# If you are proxying via nginx, normally you will set this to false and allow nginx to handle TLS.
#
use_tls = true
# This is the path to your TLS certificate chain file.
#
# If use_tls is false, this value is irrelevant.
#
# Default is "./tls/fullchain.pem".
#
# If deployed according to docs/DEPLOYING.md using the direct method, this is set to
# "/opt/chorus/etc/tls/fullchain.pem" and the systemd service copies letsencrypt TLS
# certificates into this position on start.
#
certchain_pem_path = "/opt/chorus/etc/tls/fullchain.pem"
# This is the path to yoru TLS private key file.
#
# If use_tls is false, this value is irrelevant.
#
# Default is "./tls/privkey.pem".
#
# If deployed according to docs/DEPLOYING.md using the direct method, this is set to
# "/opt/chorus/etc/tls/privkey.pem" and the systemd service copies letsencrypt TLS
# certificates into this position on start.
#
key_pem_path = "/opt/chorus/etc/tls/privkey.pem"
name = "Chorus Default"
description = "A default config of the Chorus relay"
# This is an optional name for your relay, displayed in the NIP-11 response.
#
# Default is not set
#
# name = "Chorus Default"
# This is an optional description for your relay, displayed in the NIP-11 response.
#
# Default is not set
#
# description = "A default config of the Chorus relay"
# This is an optional contact for your relay, displayed in the NIP-11 response.
#
# Default is not set
#
# contact =
# This is an optional public key (hex format) for your relay, displayed in the NIP-11 response.
#
# Default is not set
#
# public_key_hex =
# These are the public keys (hex format) of your relay's authorized users. See BEHAVIOR.md
# to understand how chorus uses these.
#
# Default is []
#
user_hex_keys = []
# This is a boolean indicating whether or not chorus verifies incoming events.
#
# This setting only skips verification of events that are submitted by AUTHed and
# authorized users. Chorus always verifies incoming AUTH events, and any event that
# is not submitted by an AUTHed and authorized relay user.
#
# Default is true.
#
verify_events = true
# This is a bad idea in production, but useful for testing or for dumping
# your entire relay
# This is a boolean indicating whether or not scraping is allowed.
# Scraping is any filter that does not match one of the following conditions:
#
# A non-empty id list is set
# A non-empty authors list is set and a non-empty kinds list is set
# A non-empty authors list is set and at least one tag is set.
# A non-empty kinds list is set and at least one tag is set.
# Has a limit <= 10
#
# Filter that fail to match these conditions will be rejected if allow_scraping is false.
#
# If allow_scraping is true, be aware that filters that don't match any of these conditions
# have no indexes to speed up their query, so they scan through every single event on the relay.
#
# The purpose of this setting is as a temporary setting that allows you to dump every single
# event on your relay, but the `dump` binary automatically sets it while it runs.
#
# Default is false.
#
allow_scraping = false
# This is an integer indicating the maximum number of subscriptions a connection can have open
# at a given time.
#
# If you set this too low, clients will be incentivised to resubmit updated subscriptions which
# will pull down the same events over again, instead of submitting a new subscription that only
# gets the additional events that the client wants. It may seem intuitive that setting this to
# a low value like 10 will decrease server load, but it will probably increase server load.
#
# It is strongly recommended to not go below 16.
#
# Default is 32.
#
max_subscriptions = 32
# Whether or not to accept and serve all ephemeral events to everybody.
#
# Default is true.
#
serve_ephemeral = true
# Whether or not to accept and serve kind 10002 Relay List Metadata (NIP-65) events to everybody.
#
# Default is true.
#
serve_relay_lists = true
# How verbose to log issues with the main server code.
#
# Possible values are: Trace, Debug, Info, Warn, Error
#
# Default is Info
#
server_log_level = "Info"
# How verbose to log library issues and other general issues
#
# Possible values are: Trace, Debug, Info, Warn, Error
#
# Default is Warn
#
library_log_level = "Warn"
# How verbose to log issues with client requests
#
# Possible values are: Trace, Debug, Info, Warn, Error
#
# Default is Error
#
client_log_level = "Error"

View File

@ -123,7 +123,7 @@ Default is false.
### max_subscriptions
This is a usize indicating the maximum number of subscriptions a connection can have open at a given time.
This is an integer indicating the maximum number of subscriptions a connection can have open at a given time.
If you set this too low, clients will be incentivised to resubmit updated subscriptions which will pull down the same events over again, instead of submitting a new subscription that only gets the additional events that the client wants. It may seem intuitive that setting this to a low value like 10 will decrease server load, but it will probably increase server load.
@ -133,12 +133,37 @@ Default is 32.
### serve_ephemeral
Accept and serve all ephemeral events to everybody.
Whether or not to accept and serve all ephemeral events to everybody.
Default is true.
### serve_relay_lists
Accept and serve kind 10002 events to everybody.
Whether or not to accept and serve kind 10002 Relay List Metadata (NIP-65) events to everybody.
Default is true.
### server_log_level
How verbose to log issues with the main server code.
Possible values are: Trace, Debug, Info, Warn, Error
Default is Info
### library_log_level
How verbose to log library issues and other general issues
Possible values are: Trace, Debug, Info, Warn, Error
Default is Warn
### client_log_level
How verbose to log issues with client requests
Possible values are: Trace, Debug, Info, Warn, Error
Default is Error

View File

@ -1,6 +1,4 @@
#!/bin/bash
export RUST_LOG='info,chorus=debug'
cargo build --release && \
./target/release/chorus ./sample/sample.config.toml

View File

@ -1,29 +1,22 @@
data_directory = "./sample"
# See contrib/chorus.toml for a documented config file
data_directory = "./sample"
ip_address = "127.0.0.1"
port = 8080
hostname = "localhost"
use_tls = false
certchain_pem_path = "tls/fullchain.pem"
key_pem_path = "tls/privkey.pem"
name = "Chorus Sample"
description = "A sample run of the Chorus relay"
#contact =
#public_key_hex =
user_hex_keys = [
"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49"
]
verify_events = true
# This is a bad idea in production, but useful for testing or for dumping
# your entire relay
allow_scraping = false
max_subscriptions = 32
serve_ephemeral = true
serve_relay_lists = true
server_log_level = "Info"
library_log_level = "Warn"
client_log_level = "Error"