Refactor, again

This commit is contained in:
Raphaël Thériault 2020-08-02 15:56:39 -04:00
parent 013c3f5ca8
commit b8484b2a9a
14 changed files with 693 additions and 983 deletions

4
.gitignore vendored
View File

@ -1,9 +1,7 @@
target/
.env
.vscode/
.idea/
filite.toml
filite.json
.env

923
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ name = "filite"
version = "0.3.0"
authors = ["Raphaël Thériault <raphael_theriault@outlook.com>"]
edition = "2018"
description = "A modular and standalone pastebin, URL shortener and file-sharing service"
description = "A flexible and standalone pastebin, URL shortener and file-sharing service"
homepage = "https://github.com/raftario/filite"
repository = "https://github.com/raftario/filite"
readme = "README.md"
@ -15,58 +15,55 @@ keywords = [
license = "MIT"
[dependencies]
anyhow = "1.0.31"
anyhow = "1.0.32"
askama = "0.10.3"
cfg-if = "0.1.10"
chrono = "0.4.11"
futures-core = "0.3.5"
futures-util = "0.3.5"
log = { version = "0.4.8", features = ["serde"] }
chrono = { version = "0.4.13", features = ["serde"] }
futures = "0.3.5"
rust-argon2 = "0.8.2"
serde = { version = "1.0.114", features = ["derive"] }
serde_json = "1.0.55"
sqlx = { version = "0.3.5", features = ["chrono", "macros", "runtime-tokio"], default-features = false }
serde_json = "1.0.57"
sqlx = { version = "0.4.0-beta.1", features = ["any", "chrono", "macros", "migrate", "runtime-tokio"], default-features = false }
structopt = "0.3.15"
tokio = { version = "0.2.21", features = ["blocking", "fs", "rt-core"] }
warp = { version = "0.2.3", features = ["multipart"], default-features = false }
tokio = { version = "0.2.22", features = ["blocking", "fs", "rt-core"] }
tracing = "0.1.18"
tracing-futures = "0.2.4"
tracing-subscriber = "0.2.10"
warp = { version = "0.2.4", default-features = false }
# Makes cross-compiliing easier by statically linking to OpenSSL
[target.'cfg(not(any(target_os = "windows", target_os = "macos", target_os = "ios")))'.dependencies]
openssl-sys = { version = "0.9", features = ["vendored"] }
[target.'cfg(not(any(target_os = "windows", target_os = "macos")))'.dependencies]
openssl = { version = "*", features = ["vendored"] }
[features]
default = ["full"]
lite = ["sqlite"]
classic = [
"sqlite",
"threaded",
"highlight",
]
# Presets
lite = ["filite", "sqlite"]
full = [
"filite",
"sqlite",
"postgres",
"mysql",
"tls",
"threaded",
"analytics",
"highlight",
]
# Data types
fi = []
li = []
te = []
filite = ["fi", "li", "te"]
# Database backends
sqlite = ["sqlx/sqlite"]
postgres = ["sqlx/postgres"]
mysql = ["sqlx/mysql"]
# TLS support for the database and web server
tls = [
"sqlx/tls",
"warp/tls",
]
# TLS support
tls = ["warp/tls"]
# Threaded runtime
threaded = ["tokio/rt-threaded"]
# Various analytics (no external services)
# Track views
analytics = []
# Syntax highlighting
highlight = []

View File

@ -2,7 +2,7 @@
> The README isn't representative of the current status of the `next` branch and will only be updated once the changes are stabilised.
A modular pastebin, URL shortener and file-sharing service that hosts **fi**les, redirects **li**nks and stores **te**xts.
A flexible and standalone pastebin, URL shortener and file-sharing service that hosts **fi**les, redirects **li**nks and stores **te**xts.
[![GitHub Actions](https://github.com/raftario/filite/workflows/Build/badge.svg)](https://github.com/raftario/filite/actions?workflowID=Build)
[![Crates.io](https://img.shields.io/crates/v/filite.svg)](https://crates.io/crates/filite)

View File

@ -0,0 +1,5 @@
CREATE TABLE users (
user varchar(32) PRIMARY KEY,
password varchar(256) NOT NULL,
role smallint NOT NULL
);

View File

@ -0,0 +1,11 @@
CREATE TABLE filite (
id varchar(32) PRIMARY KEY,
ty smallint NOT NULL,
val text NOT NULL,
creator varchar(32) NOT NULL REFERENCES users(user),
created timestamp NOT NULL,
visibility smallint NOT NULL,
views int NOT NULL DEFAULT 0
);

View File

@ -1,5 +1,5 @@
use crate::utils::DefaultExt;
use anyhow::Error;
use log::LevelFilter;
use serde::{Deserialize, Serialize};
use std::{
fs::File,
@ -7,141 +7,103 @@ use std::{
path::{Path, PathBuf},
};
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
fn log_level_is_info(level: &str) -> bool {
level.to_lowercase() == "info"
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(default, rename_all = "kebab-case")]
pub struct Config {
pub port: u16,
pub database_url: String,
#[serde(default)]
pub logger: LoggerConfig,
#[serde(skip_serializing_if = "log_level_is_info")]
pub log_level: String,
#[serde(default)]
#[serde(skip_serializing_if = "DefaultExt::is_default")]
pub pool: PoolConfig,
#[cfg(feature = "tls")]
#[serde(default)]
pub tls: TlsConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub tls: Option<TlsConfig>,
#[cfg(feature = "threaded")]
#[serde(default)]
#[serde(skip_serializing_if = "DefaultExt::is_default")]
pub threads: ThreadsConfig,
#[cfg(feature = "analytics")]
#[serde(default)]
pub analytics: AnalyticsConfig,
#[cfg(feature = "highlight")]
#[serde(default)]
pub highlight: HighlightConfig,
}
impl Config {
pub fn read(path: impl AsRef<Path>) -> Result<Self, Error> {
let file = File::open(path)?;
let config = serde_json::from_reader(BufReader::new(file))?;
Ok(config)
}
pub fn write(path: impl AsRef<Path>) -> Result<(), Error> {
let config = Self {
impl Default for Config {
fn default() -> Self {
Self {
port: 80,
database_url: {
cfg_if::cfg_if! {
if #[cfg(feature = "sqlite")] {
"filite.db"
"sqlite://filite.db"
} else if #[cfg(feature = "postgres")] {
"postgresql://localhost:5432/filite"
} else if #[cfg(feature = "mysql")] {
"mysql://localhost:3306/filite"
}
}
}
.to_owned(),
logger: Default::default(),
pool: Default::default(),
log_level: "info".to_owned(),
#[cfg(feature = "tls")]
tls: Default::default(),
tls: None,
pool: Default::default(),
#[cfg(feature = "threaded")]
threads: Default::default(),
#[cfg(feature = "analytics")]
analytics: Default::default(),
#[cfg(feature = "highlight")]
highlight: Default::default(),
};
let file = File::create(path)?;
serde_json::to_writer_pretty(BufWriter::new(file), &config)?;
Ok(())
}
}
#[derive(Deserialize, Serialize, Default)]
#[serde(default, rename_all = "camelCase")]
pub struct LoggerConfig {
pub console: LogLevel,
pub file: Option<FileLoggerConfig>,
}
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FileLoggerConfig {
#[serde(default)]
pub level: LogLevel,
pub path: PathBuf,
}
#[derive(Deserialize, Serialize)]
pub struct LogLevel(pub LevelFilter);
impl Default for LogLevel {
fn default() -> Self {
Self(LevelFilter::Info)
}
}
#[derive(Deserialize, Serialize, Default)]
#[serde(default, rename_all = "camelCase")]
pub struct PoolConfig {
pub min_size: Option<u32>,
pub max_size: Option<u32>,
pub connect_timeout: Option<u64>,
pub idle_timeout: Option<u64>,
pub max_lifetime: Option<u64>,
}
#[cfg(feature = "tls")]
#[derive(Deserialize, Serialize, Default)]
#[serde(default, rename_all = "camelCase")]
pub struct TlsConfig {
pub cert: Option<PathBuf>,
pub key: Option<PathBuf>,
}
#[cfg(feature = "threaded")]
#[derive(Deserialize, Serialize, Default)]
#[serde(default, rename_all = "camelCase")]
pub struct ThreadsConfig {
pub core_threads: Option<usize>,
pub max_threads: Option<usize>,
}
#[cfg(feature = "analytics")]
#[derive(Deserialize, Serialize, Default)]
#[serde(default)]
pub struct AnalyticsConfig {
pub views: bool,
}
#[cfg(feature = "highlight")]
#[derive(Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")]
pub struct HighlightConfig {
pub theme: String,
pub languages: Vec<String>,
}
#[cfg(feature = "highlight")]
impl Default for HighlightConfig {
fn default() -> Self {
Self {
theme: "default".to_owned(),
languages: Default::default(),
}
}
}
pub fn read(path: impl AsRef<Path>) -> Result<&'static Config, Error> {
let file = File::open(path)?;
let config: Config = serde_json::from_reader(BufReader::new(file))?;
Ok(&*Box::leak(Box::new(config)))
}
pub fn write(path: impl AsRef<Path>) -> Result<(), Error> {
let config: Config = Default::default();
let file = File::create(path)?;
serde_json::to_writer_pretty(BufWriter::new(file), &config)?;
Ok(())
}
#[cfg(feature = "tls")]
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TlsConfig {
pub cert: PathBuf,
pub key: PathBuf,
}
#[derive(Debug, Deserialize, Serialize, Default, PartialEq)]
#[serde(default, rename_all = "kebab-case")]
pub struct PoolConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub max_connections: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub min_connections: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub connect_timeout: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub idle_timeout: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_lifetime: Option<u64>,
}
#[cfg(feature = "threaded")]
#[derive(Debug, Deserialize, Serialize, Default, PartialEq)]
#[serde(default, rename_all = "kebab-case")]
pub struct ThreadsConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub core_threads: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_threads: Option<usize>,
}

View File

@ -1,111 +1 @@
pub mod models;
pub mod pool;
use anyhow::Error;
use futures_core::Stream;
use futures_util::StreamExt;
use models::{Filite, FiliteRow, User, UserRow};
use pool::Pool;
use std::{convert::TryInto, pin::Pin};
pub async fn fetch(id: &str, pool: &Pool) -> Result<Option<Filite>, Error> {
let sql = "SELECT * FROM data WHERE id = ?";
let row: Option<FiliteRow> = match pool {
#[cfg(feature = "sqlite")]
Pool::Sqlite(p) => {
use sqlx::sqlite::SqliteQueryAs;
sqlx::query_as(sql).bind(id).fetch_optional(p).await?
}
#[cfg(feature = "postgres")]
Pool::Postgres(p) => {
use sqlx::postgres::PgQueryAs;
sqlx::query_as(sql).bind(id).fetch_optional(p).await?
}
#[cfg(feature = "mysql")]
Pool::MySql(p) => {
use sqlx::mysql::MySqlQueryAs;
sqlx::query_as(sql).bind(id).fetch_optional(p).await?
}
};
let filite: Option<Filite> = match row {
Some(row) => Some(row.try_into()?),
None => None,
};
Ok(filite)
}
pub fn fetch_all<'a>(pool: &'a Pool) -> impl Stream<Item = Result<Filite, Error>> + 'a {
let sql = "SELECT * FROM data";
let rows: Pin<Box<dyn Stream<Item = Result<FiliteRow, sqlx::Error>>>> = match pool {
#[cfg(feature = "sqlite")]
Pool::Sqlite(p) => {
use sqlx::sqlite::SqliteQueryAs;
sqlx::query_as(sql).fetch(p)
}
#[cfg(feature = "postgres")]
Pool::Postgres(p) => {
use sqlx::postgres::PgQueryAs;
sqlx::query_as(sql).fetch(p)
}
#[cfg(feature = "mysql")]
Pool::MySql(p) => {
use sqlx::mysql::MySqlQueryAs;
sqlx::query_as(sql).fetch(p)
}
};
rows.map(|r| match r {
Ok(r) => r.try_into(),
Err(e) => Err(e.into()),
})
}
pub async fn fetch_user(username: &str, pool: &Pool) -> Result<Option<User>, Error> {
let sql = "SELECT * FROM users WHERE username = ?";
let row: Option<UserRow> = match pool {
#[cfg(feature = "sqlite")]
Pool::Sqlite(p) => {
use sqlx::sqlite::SqliteQueryAs;
sqlx::query_as(sql).bind(username).fetch_optional(p).await?
}
#[cfg(feature = "postgres")]
Pool::Postgres(p) => {
use sqlx::postgres::PgQueryAs;
sqlx::query_as(sql).bind(username).fetch_optional(p).await?
}
#[cfg(feature = "mysql")]
Pool::MySql(p) => {
use sqlx::mysql::MySqlQueryAs;
sqlx::query_as(sql).bind(username).fetch_optional(p).await?
}
};
let user: Option<User> = match row {
Some(row) => Some(row.try_into()?),
None => None,
};
Ok(user)
}
pub fn fetch_all_users<'a>(pool: &'a Pool) -> impl Stream<Item = Result<User, Error>> + 'a {
let sql = "SELECT * FROM users";
let rows: Pin<Box<dyn Stream<Item = Result<UserRow, sqlx::Error>>>> = match pool {
#[cfg(feature = "sqlite")]
Pool::Sqlite(p) => {
use sqlx::sqlite::SqliteQueryAs;
sqlx::query_as(sql).fetch(p)
}
#[cfg(feature = "postgres")]
Pool::Postgres(p) => {
use sqlx::postgres::PgQueryAs;
sqlx::query_as(sql).fetch(p)
}
#[cfg(feature = "mysql")]
Pool::MySql(p) => {
use sqlx::mysql::MySqlQueryAs;
sqlx::query_as(sql).fetch(p)
}
};
rows.map(|r| match r {
Ok(r) => r.try_into(),
Err(e) => Err(e.into()),
})
}

View File

@ -1,152 +0,0 @@
use anyhow::{anyhow, Error};
// use chrono::{DateTime, Utc};
use std::{
convert::{TryFrom, TryInto},
path::PathBuf,
};
use tokio::task;
#[derive(sqlx::FromRow)]
pub struct FiliteRow {
id: String,
ty: i32,
val: String,
creator: String,
// created: DateTime<Utc>,
visibility: i32,
views: i32,
}
pub enum Filite {
File {
id: String,
path: PathBuf,
creator: String,
// created: DateTime<Utc>,
visibility: Visibility,
#[cfg(feature = "analytics")]
views: i32,
},
Link {
id: String,
url: String,
creator: String,
// created: DateTime<Utc>,
visibility: Visibility,
#[cfg(feature = "analytics")]
views: i32,
},
Text {
id: String,
contents: String,
creator: String,
// created: DateTime<Utc>,
visibility: Visibility,
#[cfg(feature = "analytics")]
views: i32,
},
}
pub enum Visibility {
Public,
Internal,
Private,
}
impl TryFrom<FiliteRow> for Filite {
type Error = Error;
fn try_from(row: FiliteRow) -> Result<Self, Self::Error> {
match row.ty {
0 => Ok(Filite::File {
id: row.id,
path: PathBuf::from(row.val),
creator: row.creator,
// created: row.created,
visibility: row.visibility.try_into()?,
#[cfg(feature = "analytics")]
views: row.views,
}),
1 => Ok(Filite::Link {
id: row.id,
url: row.val,
creator: row.creator,
// created: row.created,
visibility: row.visibility.try_into()?,
#[cfg(feature = "analytics")]
views: row.views,
}),
2 => Ok(Filite::Text {
id: row.id,
contents: row.val,
creator: row.creator,
// created: row.created,
visibility: row.visibility.try_into()?,
#[cfg(feature = "analytics")]
views: row.views,
}),
ty => Err(anyhow!("unknown type {}", ty)),
}
}
}
impl TryFrom<i32> for Visibility {
type Error = Error;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Visibility::Public),
1 => Ok(Visibility::Internal),
2 => Ok(Visibility::Private),
_ => Err(anyhow!("unknown visibility {}", value)),
}
}
}
#[derive(sqlx::FromRow)]
pub struct UserRow {
username: String,
password: String,
role: i32,
// registered: DateTime<Utc>,
}
pub struct User {
pub username: String,
pub password: String,
pub role: Role,
// pub registered: DateTime<Utc>,
}
pub enum Role {
User,
Admin,
}
impl TryFrom<UserRow> for User {
type Error = Error;
fn try_from(value: UserRow) -> Result<Self, Self::Error> {
Ok(User {
username: value.username,
password: value.password,
role: value.role.try_into()?,
})
}
}
impl TryFrom<i32> for Role {
type Error = Error;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Role::User),
1 => Ok(Role::Admin),
_ => Err(anyhow!("unknown role {}", value)),
}
}
}
impl User {
pub async fn verify_password(&self, password: &str) -> Result<bool, Error> {
let encoded = self.password.clone();
let pwd = password.as_bytes().to_vec();
Ok(task::spawn_blocking(move || argon2::verify_encoded(&encoded, &pwd)).await??)
}
}

View File

@ -1,65 +1,32 @@
use crate::config::{Config, PoolConfig};
use crate::config::Config;
use anyhow::Error;
use sqlx::{pool::Builder, Connect};
use sqlx::any::{AnyPool, AnyPoolOptions};
use std::time::Duration;
pub enum Pool {
#[cfg(feature = "sqlite")]
Sqlite(sqlx::SqlitePool),
#[cfg(feature = "postgres")]
Postgres(sqlx::PgPool),
#[cfg(feature = "mysql")]
MySql(sqlx::MySqlPool),
}
#[tracing::instrument(level = "debug")]
pub async fn build(config: &Config) -> Result<&'static AnyPool, Error> {
let mut options: AnyPoolOptions = Default::default();
impl Pool {
pub async fn build(config: &Config) -> Result<Self, Error> {
if config.database_url.starts_with("postgresql://") {
cfg_if::cfg_if! {
if #[cfg(feature = "postgres")] {
let pool = Self::apply_config(sqlx::PgPool::builder(), &config.pool);
Ok(Self::Postgres(pool.build(&config.database_url).await?))
} else {
Err(anyhow::anyhow!("This build of filite doesn't support PostgreSQL"))
}
}
} else if config.database_url.starts_with("mysql://") {
cfg_if::cfg_if! {
if #[cfg(feature = "mysql")] {
let pool = Self::apply_config(sqlx::MySqlPool::builder(), &config.pool);
Ok(Self::MySql(pool.build(&config.database_url).await?))
} else {
Err(anyhow::anyhow!("This build of filite doesn't support MySQL"))
}
}
} else {
cfg_if::cfg_if! {
if #[cfg(feature = "sqlite")] {
let pool = Self::apply_config(sqlx::SqlitePool::builder(), &config.pool);
Ok(Self::Sqlite(pool.build(&config.database_url).await?))
} else {
Err(anyhow::anyhow!("This build of filite doesn't support SQLite"))
}
}
}
if let Some(ms) = config.pool.max_connections {
options = options.max_connections(ms);
}
if let Some(ms) = config.pool.min_connections {
options = options.min_connections(ms);
}
fn apply_config<C: Connect>(mut builder: Builder<C>, config: &PoolConfig) -> Builder<C> {
if let Some(ms) = config.max_size {
builder = builder.max_size(ms);
}
if let Some(ms) = config.min_size {
builder = builder.min_size(ms);
}
if let Some(ct) = config.connect_timeout {
builder = builder.connect_timeout(Duration::from_millis(ct));
}
if let Some(it) = config.idle_timeout {
builder = builder.idle_timeout(Duration::from_millis(it));
}
if let Some(ml) = config.max_lifetime {
builder = builder.max_lifetime(Duration::from_millis(ml));
}
builder
if let Some(ct) = config.pool.connect_timeout {
options = options.connect_timeout(Duration::from_millis(ct));
}
if let Some(it) = config.pool.idle_timeout {
options = options.idle_timeout(Duration::from_millis(it));
}
if let Some(ml) = config.pool.max_lifetime {
options = options.max_lifetime(Duration::from_millis(ml));
}
let pool = options.connect(&config.database_url).await?;
sqlx::migrate!("./migrations").run(&pool).await?;
Ok(&*Box::leak(Box::new(pool)))
}

View File

@ -1,88 +0,0 @@
use crate::config::LoggerConfig;
use anyhow::Error;
use log::{self, Level, LevelFilter, Log, Metadata, Record};
use std::{
fs::{File, OpenOptions},
io::{self, BufWriter, Stderr, Stdout, Write},
sync::Mutex,
};
pub fn init(config: &LoggerConfig) -> Result<(), Error> {
let logger = Logger {
console: (
config.console.0,
Mutex::new(io::stderr()),
Mutex::new(io::stdout()),
),
file: match &config.file {
Some(fc) => Some((
fc.level.0,
Mutex::new(BufWriter::new(
OpenOptions::new()
.append(true)
.create(true)
.open(&fc.path)?,
)),
)),
None => None,
},
};
log::set_boxed_logger(Box::new(logger))?;
Ok(())
}
struct Logger {
console: (LevelFilter, Mutex<Stderr>, Mutex<Stdout>),
file: Option<(LevelFilter, Mutex<BufWriter<File>>)>,
}
impl Log for Logger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= self.console.0
|| self
.file
.as_ref()
.map_or(false, |(lf, ..)| metadata.level() <= *lf)
}
fn log(&self, record: &Record) {
let target = record.target();
let level = record.level();
let args = record.args();
if let Some((lf, bw)) = &self.file {
if level <= *lf {
bw.lock()
.unwrap()
.write_fmt(format_args!("[{}]::[{}] {}\n", target, level, args))
.ok();
}
}
if level <= self.console.0 {
if level <= Level::Warn {
self.console
.1
.lock()
.unwrap()
.write_fmt(format_args!("[{}]::[{}] {}\n", target, level, args))
.ok();
} else {
self.console
.2
.lock()
.unwrap()
.write_fmt(format_args!("[{}]::[{}] {}\n", target, level, args))
.ok();
}
}
}
fn flush(&self) {
self.console.1.lock().unwrap().flush().ok();
self.console.2.lock().unwrap().flush().ok();
if let Some((_, bw)) = &self.file {
bw.lock().unwrap().flush().ok();
}
}
}

View File

@ -1,16 +1,18 @@
#[cfg(not(any(feature = "fi", feature = "li", feature = "te")))]
compile_error!("You need to select at least one data type");
#[cfg(not(any(feature = "sqlite", feature = "postgres", feature = "mysql")))]
compile_error!("You need to select at least one database backend");
mod config;
mod db;
mod logger;
mod runtime;
mod utils;
use anyhow::Error;
use config::Config;
use db::pool::Pool;
use std::path::PathBuf;
use structopt::StructOpt;
use tracing_subscriber::fmt::format::FmtSpan;
#[derive(StructOpt)]
#[structopt(author, about)]
@ -29,37 +31,21 @@ struct Opt {
#[derive(StructOpt)]
enum Command {
/// Initialises the configuration file with default values
InitConfig {
/// File to write
///
/// If unspecified, will write to a filite.json
/// file in the current working directory.
#[structopt(name = "FILE")]
path: Option<PathBuf>,
},
/// Initialises the database tables
InitDatabase {
/// Database connection URL
#[structopt(name = "URL")]
url: String,
},
Init,
}
fn main() -> Result<(), Error> {
let args: Opt = Opt::from_args();
match &args.command {
Some(Command::InitConfig { path }) => {
return init_config(match path {
Some(_) => path.as_ref(),
None => args.config.as_ref(),
})
}
Some(Command::InitDatabase { url }) => return init_database(url),
None => (),
if let Some(Command::Init) = &args.command {
return init_config(args.config.as_ref());
}
let config = Config::read(args.config.unwrap_or_else(|| PathBuf::from("filite.json")))?;
logger::init(&config.logger)?;
let config = config::read(args.config.unwrap_or_else(|| PathBuf::from("filite.json")))?;
tracing_subscriber::fmt()
.with_env_filter(&config.log_level)
.with_span_events(FmtSpan::CLOSE)
.init();
let mut runtime = runtime::build(&config)?;
runtime.block_on(run(config))?;
@ -67,17 +53,13 @@ fn main() -> Result<(), Error> {
Ok(())
}
async fn run(config: Config) -> Result<(), Error> {
let _pool = Pool::build(&config).await?;
async fn run(config: &'static Config) -> Result<(), Error> {
let _pool = db::pool::build(&config).await?;
Ok(())
}
fn init_config(path: Option<&PathBuf>) -> Result<(), Error> {
config::Config::write(path.unwrap_or(&PathBuf::from("filite.json")))?;
config::write(path.unwrap_or(&PathBuf::from("filite.json")))?;
println!("Default config written");
Ok(())
}
fn init_database(_url: &str) -> Result<(), Error> {
Ok(())
}

View File

@ -1,8 +1,8 @@
use crate::config::Config;
use anyhow::Error;
use log::debug;
use tokio::runtime::{Builder, Runtime};
#[tracing::instrument(level = "debug")]
#[cfg_attr(not(feature = "threaded"), allow(unused_variables))]
pub fn build(config: &Config) -> Result<Runtime, Error> {
let mut builder = Builder::new();
@ -21,6 +21,5 @@ pub fn build(config: &Config) -> Result<Runtime, Error> {
}
}
debug!("Building Tokio runtime");
Ok(builder.build()?)
}

8
src/utils.rs Normal file
View File

@ -0,0 +1,8 @@
pub trait DefaultExt {
fn is_default(&self) -> bool;
}
impl<T: Default + PartialEq> DefaultExt for T {
fn is_default(&self) -> bool {
self.eq(&Default::default())
}
}