diff --git a/Cargo.toml b/Cargo.toml index 7dc985c..c475b75 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "lust" -version = "1.3.5" +version = "2.0.0" authors = ["Harrison Burt <57491488+ChillFish8@users.noreply.github.com>"] -edition = "2018" +edition = "2021" documentation = "getting-started.md" readme = "README.md" license = "MIT" @@ -14,36 +14,18 @@ description = "A fast, auto-optimising image server designed for multiple backen [dependencies] webp = { version = "*", path = "./webp" } -image = "0.23" +image = "0.24.1" base64 = "0.13.0" bytes = "1" anyhow = "1" -clap = "2" +clap = "3" serde_json = "1" -serde_variant = "0.1.0" -async-trait = "0.1.50" -once_cell = "1.7.2" -concread = "0.2.14" +async-trait = "0.1" +once_cell = "1.10.0" futures = "0.3" -log = "0.4.14" -pretty_env_logger = "0.4.0" - -gotham = "0.6.0" -gotham_derive = "0.6.0" -headers = "0.3" - -tokio = { version = "1", features = ["full"] } -serde = { version = "1", features = ["derive"] } -chrono = { version = "0.4", features = ["serde"] } -uuid = { version = "0.8.2", features = ["serde", "v4"] } -hashbrown = { version = "0.11.2", features = ["serde"] } -sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "mysql", "sqlite", "postgres", "chrono", "uuid" ] } -redis = { version = "0.20", features = ["tokio-comp", "connection-manager"] } -scylla = "0.2.1" - [profile.release] lto = "fat" codegen-units = 1 diff --git a/Dockerfile b/Dockerfile index fe9965d..ddefc03 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ FROM rust:slim-buster as build -WORKDIR /code +WORKDIR /app -COPY . /code +COPY . /app RUN cargo build --release @@ -10,7 +10,7 @@ RUN cargo build --release FROM debian:buster-slim WORKDIR /etc/lust -COPY --from=build /code/target/release/lust / +COPY --from=build /app/target/release/lust / USER root ENTRYPOINT ["./lust", "run"] diff --git a/rustfmt.toml b/rustfmt.toml index 2b8802b..2cf8f73 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -3,4 +3,5 @@ combine_control_expr = false imports_layout = "HorizontalVertical" match_block_trailing_comma = true imports_granularity = "Module" -group_imports = "StdExternalCrate" \ No newline at end of file +group_imports = "StdExternalCrate" +max_width = 89 \ No newline at end of file diff --git a/src/backends/cql.rs b/src/backends/cql.rs deleted file mode 100644 index 4700667..0000000 --- a/src/backends/cql.rs +++ /dev/null @@ -1,379 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; -use bytes::{Bytes, BytesMut}; -use chrono::{DateTime, NaiveDateTime, Utc}; -use hashbrown::HashMap; -use log::{debug, info, warn}; -use scylla::query::Query; -use scylla::statement::prepared_statement::PreparedStatement; -use scylla::transport::session::Session; -use scylla::{QueryResult, SessionBuilder}; -use serde::{Deserialize, Serialize}; -use serde_variant::to_variant_name; -use uuid::Uuid; - -use crate::configure::PAGE_SIZE; -use crate::context::{FilterType, IndexResult, OrderBy}; -use crate::image::{ImageFormat, ImagePresetsData}; -use crate::traits::{DatabaseLinker, ImageStore}; - -/// Represents a connection pool session with a round robbin load balancer. -type CurrentSession = Session; - -type PagedRow = (Uuid, String, i64, i32); - -#[derive(Clone, Serialize, Deserialize)] -#[serde(tag = "strategy", content = "spec")] -enum ReplicationClass { - SimpleStrategy(SimpleNode), - NetworkTopologyStrategy(Vec), -} - -#[derive(Clone, Serialize, Deserialize)] -struct SimpleNode { - replication_factor: usize, -} - -#[derive(Clone, Serialize, Deserialize)] -struct DataCenterNode { - node_name: String, - replication: usize, -} - -/// The configuration for a cassandra database. -/// -/// Each cluster should be given in the `host:port` format and -/// should only be the main node (not replication nodes). -/// -/// The replication_factor is used when the keyspace is first created, -/// if the keyspace already exists this number may be ignored despite -/// being changed due to current implementation limitations. -/// -/// The replication_class is used when the keyspace is first created, -/// this has the same caveats as the replication_factor. -#[derive(Clone, Deserialize)] -pub struct DatabaseConfig { - clusters: Vec, - keyspace: ReplicationClass, - user: String, - password: String, -} - -macro_rules! log_and_convert_error { - ( $e:expr ) => {{ - match $e { - Ok(frame) => Some(frame), - Err(e) => { - warn!("failed to execute query {:?}", e); - None - }, - } - }}; -} - -async fn get_page( - filter: &FilterType, - session: &CurrentSession, - stmt: &PreparedStatement, - page_state: Option, -) -> Result { - Ok(match &filter { - FilterType::All => session.execute_paged(stmt, &[], page_state).await?, - FilterType::CreationDate(v) => { - session - .execute_paged(stmt, (v.to_string(),), page_state) - .await? - }, - FilterType::Category(v) => session.execute_paged(stmt, (v,), page_state).await?, - }) -} - -/// A cassandra database backend. -pub struct Backend { - session: CurrentSession, - check_cat: Option, - get_file: HashMap>, -} - -impl Backend { - pub async fn connect(cfg: DatabaseConfig) -> Result { - info!("connecting to database"); - let session = SessionBuilder::new() - .user(cfg.user, cfg.password) - .known_nodes(cfg.clusters.as_ref()) - .build() - .await?; - info!("connect successful"); - - let replication = match cfg.keyspace { - ReplicationClass::SimpleStrategy(node) => { - format!( - "'class': 'SimpleStrategy', 'replication_factor': {}", - node.replication_factor, - ) - }, - ReplicationClass::NetworkTopologyStrategy(mut nodes) => { - let mut spec = nodes - .drain(..) - .map(|v| format!("'{}': {}", v.node_name, v.replication)) - .collect::>(); - - spec.insert(0, "'class' : 'NetworkTopologyStrategy'".to_string()); - - spec.join(", ") - }, - }; - - let create_ks = format!( - "CREATE KEYSPACE IF NOT EXISTS lust_ks WITH REPLICATION = {{{}}};", - replication - ); - debug!("creating keyspace {}", &create_ks); - - let _ = session.query(create_ks, &[]).await?; - info!("keyspace ensured"); - - Ok(Self { - session, - check_cat: None, - get_file: HashMap::new(), - }) - } -} - -#[async_trait] -impl DatabaseLinker for Backend { - async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec) -> Result<()> { - info!("building tables"); - - let query = r#" - CREATE TABLE IF NOT EXISTS lust_ks.image_metadata ( - file_id UUID, - category TEXT, - insert_date TIMESTAMP, - total_size BIGINT, - PRIMARY KEY ((file_id), category) - ) WITH CLUSTERING ORDER BY (category DESC); - "#; - - self.session.query(query, &[]).await?; - info!("metadata table created successfully"); - - let query = r#" - CREATE INDEX IF NOT EXISTS ON lust_ks.image_metadata (category); - "#; - - self.session.query(query, &[]).await?; - info!("metadata table index created successfully"); - - let mut columns = vec![format!("file_id UUID PRIMARY KEY")]; - - for format in formats.iter() { - let column = to_variant_name(format).expect("unreachable"); - columns.push(format!("{} BLOB", column)) - } - - for preset in presets { - let query = format!( - "CREATE TABLE IF NOT EXISTS lust_ks.{table} ({columns})", - table = preset, - columns = columns.join(", ") - ); - - self.session.query(query, &[]).await?; - debug!("created preset table {}", preset); - - for format in formats.iter() { - let column = to_variant_name(format).expect("unreachable"); - - let qry = format!( - "SELECT {column} FROM lust_ks.{table} WHERE file_id = ? LIMIT 1;", - column = column, - table = preset, - ); - - let prepared = self.session.prepare(qry).await?; - debug!("prepared check query {:?}", format); - - if let Some(tbl) = self.get_file.get_mut(preset) { - tbl.insert(column.to_string(), prepared); - } else { - let mut new_map = HashMap::new(); - new_map.insert(column.to_string(), prepared); - self.get_file.insert(preset.to_string(), new_map); - } - } - } - info!("tables created"); - - let qry = r#" - SELECT file_id FROM lust_ks.image_metadata - WHERE file_id = ? AND category = ?; - "#; - let prepared = self.session.prepare(qry).await?; - self.check_cat = Some(prepared); - - info!("prepared all queries and tables"); - - Ok(()) - } -} - -#[async_trait] -impl ImageStore for Backend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - let prepared = self.check_cat.as_ref().unwrap(); - let query_result = - log_and_convert_error!(self.session.execute(prepared, (file_id, category)).await)?; - - let _ = query_result.rows?; - - let column = to_variant_name(&format).expect("unreachable"); - let prepared = self.get_file.get(&preset)?.get(column)?; - - let query_result = - log_and_convert_error!(self.session.execute(prepared, (file_id,)).await)?; - - let mut rows = query_result.rows?; - let row = rows.pop()?; - let (data,) = log_and_convert_error!(row.into_typed::<(Vec,)>())?; - let ref_: &[u8] = data.as_ref(); - Some(BytesMut::from(ref_)) - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let mut total: i64 = 0; - for (preset, preset_data) in data { - let sum: i64 = preset_data.values().map(|v| v.len() as i64).sum(); - total += sum; - - let columns: String = preset_data - .keys() - .map(|v| to_variant_name(v).expect("unreachable")) - .collect::>() - .join(", "); - - let placeholders: String = (0..preset_data.len()) - .map(|_| "?") - .collect::>() - .join(", "); - - let mut values: Vec> = preset_data.values().map(|v| v.to_vec()).collect(); - - values.insert(0, file_id.as_bytes().to_vec()); - - let qry = format!( - "INSERT INTO lust_ks.{table} (file_id, {columns}) VALUES (?, {placeholders});", - table = preset, - columns = columns, - placeholders = placeholders, - ); - - let prepared = self.session.prepare(qry).await?; - self.session.execute(&prepared, values).await?; - } - - let qry = r#" - INSERT INTO lust_ks.image_metadata ( - file_id, - category, - insert_date, - total_size - ) VALUES (?, ?, ?, ?);"#; - - let now = Utc::now(); - - self.session - .query(qry, (file_id, category, now.timestamp(), total)) - .await?; - Ok(()) - } - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> { - for preset in presets { - let qry = format!( - "DELETE FROM lust_ks.{table} WHERE file_id = ?;", - table = preset, - ); - - self.session - .query(qry, (file_id.as_bytes().to_vec(),)) - .await?; - } - - let qry = "DELETE FROM lust_ks.image_metadata WHERE file_id = ?;"; - - self.session.query(qry, (file_id,)).await?; - Ok(()) - } - - async fn list_entities( - &self, - filter: FilterType, - _order: OrderBy, - page: usize, - ) -> Result> { - let qry = format!( - r#" - SELECT file_id, category, insert_date, total_size - FROM lust_ks.image_metadata - "#, - ); - - let mut query = match &filter { - FilterType::All => { - let qry = format!("{};", qry); - Query::new(qry) - }, - FilterType::CreationDate(_) => { - let qry = format!("{} WHERE insert_date = ?;", qry); - Query::new(qry) - }, - FilterType::Category(_) => { - let qry = format!("{} WHERE category = ?;", qry); - Query::new(qry) - }, - }; - - query.set_page_size(PAGE_SIZE as i32); - let prepared = self.session.prepare(query).await?; - let mut page_state = None; - - for _ in 0..page - 1 { - let rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?; - - page_state = rows.paging_state; - } - - let target_rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?; - - let results = if let Some(mut rows) = target_rows.rows { - rows.drain(..) - .map(|r| { - let r = r - .into_typed::() - .expect("database format invalidated"); - - let res = IndexResult { - file_id: r.0, - category: r.1, - created_on: DateTime::from_utc(NaiveDateTime::from_timestamp(r.2, 0), Utc), - total_size: r.3, - }; - - res - }) - .collect() - } else { - vec![] - }; - - Ok(results) - } -} diff --git a/src/backends/mod.rs b/src/backends/mod.rs deleted file mode 100644 index 836c1bd..0000000 --- a/src/backends/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod cql; -pub mod redis; -pub mod sql; diff --git a/src/backends/redis.rs b/src/backends/redis.rs deleted file mode 100644 index 81e313b..0000000 --- a/src/backends/redis.rs +++ /dev/null @@ -1,148 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -use anyhow::Result; -use async_trait::async_trait; -use bytes::BytesMut; -use log::error; -use redis::aio::ConnectionManager; -use redis::{AsyncCommands, AsyncIter}; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -use crate::context::{FilterType, IndexResult, OrderBy}; -use crate::image::{ImageFormat, ImagePresetsData}; -use crate::traits::{DatabaseLinker, ImageStore}; - -#[derive(Clone, Serialize, Deserialize)] -pub struct RedisConfig { - connection_uri: String, - pool_size: usize, -} - -struct RedisPool { - connections: Vec, - index: AtomicUsize, -} - -impl RedisPool { - pub async fn connect(cfg: RedisConfig) -> Result { - let client = redis::Client::open(cfg.connection_uri)?; - let mut conns = Vec::new(); - for _ in 0..cfg.pool_size { - let conn = client.get_tokio_connection_manager().await?; - conns.push(conn); - } - - Ok(Self { - connections: conns, - index: AtomicUsize::new(0), - }) - } - - pub fn get(&self) -> ConnectionManager { - let index = self.index.load(Ordering::Relaxed); - let conn = self.connections[index].clone(); - - if index == (self.connections.len() - 1) { - self.index.store(0, Ordering::Relaxed); - } else { - self.index.store(index + 1, Ordering::Relaxed); - } - - conn - } -} - -pub struct Backend { - pool: RedisPool, -} - -impl Backend { - pub async fn connect(cfg: RedisConfig) -> Result { - let pool = RedisPool::connect(cfg).await?; - - Ok(Self { pool }) - } -} - -#[async_trait] -impl DatabaseLinker for Backend { - /// Due to the nature of the key-value setup for redis clients this has completely - /// different handling so does not do anything when this funciton is called. - async fn ensure_tables( - &mut self, - _presets: Vec<&str>, - _columns: Vec, - ) -> Result<()> { - Ok(()) - } -} - -#[async_trait] -impl ImageStore for Backend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - let key = format!("{:?} {} {} {:?}", file_id, preset, category, format); - let mut conn = self.pool.get(); - let result = conn.get(&key).await; - - let val: Vec = match result { - Ok(v) => v, - Err(e) => { - error!("failed to fetch key {} from redis: {:?}", &key, e); - return None; - }, - }; - - if val.len() == 0 { - None - } else { - let ref_: &[u8] = val.as_ref(); - Some(BytesMut::from(ref_)) - } - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let mut pairs = Vec::new(); - - for (preset, formats) in data { - for (format, buff) in formats { - let key = format!("{:?} {} {} {:?}", &file_id, &preset, category, format); - pairs.push((key, buff.to_vec())); - } - } - - let mut conn = self.pool.get(); - conn.set_multiple(&pairs).await?; - - Ok(()) - } - - async fn remove_image(&self, file_id: Uuid, _presets: Vec<&String>) -> Result<()> { - let mut conn = self.pool.get(); - let mut conn2 = self.pool.get(); - let mut keys: AsyncIter = conn.scan_match(format!("{:?}*", file_id)).await?; - while let Some(v) = keys.next_item().await { - conn2.del(v).await?; - } - - Ok(()) - } - - /// This is non-functional due to limitations with the key-value setup of redis. - async fn list_entities( - &self, - _filter: FilterType, - _order: OrderBy, - _page: usize, - ) -> Result> { - Err(anyhow::Error::msg( - "redis backend does not support listing entities", - )) - } -} diff --git a/src/backends/sql.rs b/src/backends/sql.rs deleted file mode 100644 index 5af7f26..0000000 --- a/src/backends/sql.rs +++ /dev/null @@ -1,676 +0,0 @@ -use std::str::FromStr; - -use anyhow::Result; -use async_trait::async_trait; -use bytes::BytesMut; -use chrono::Utc; -use log::{debug, error, info}; -use serde::Deserialize; -use serde_variant::to_variant_name; -use sqlx::mysql::{MySqlPool, MySqlPoolOptions}; -use sqlx::postgres::{PgPool, PgPoolOptions}; -use sqlx::sqlite::{SqlitePool, SqlitePoolOptions}; -use sqlx::Row; -use uuid::Uuid; - -use crate::configure::PAGE_SIZE; -use crate::context::{FilterType, IndexResult, OrderBy}; -use crate::image::{ImageFormat, ImagePresetsData}; -use crate::traits::{DatabaseLinker, ImageStore}; - -/// The configuration for the SQL based database backends. -/// -/// The `connection_uri` should be formatted as a direct connect -/// uri. e.g. -/// `postgresql://john:boo@localhost/postgres` -/// -/// The `pool_size` determined the *maximum* amount of pool connections. -#[derive(Clone, Deserialize)] -pub struct DatabaseConfig { - connection_uri: String, - pool_size: u32, -} - -fn build_select_qry(column: &str, preset: &str, placeholder: &str) -> String { - format!( - "SELECT {column} FROM {table} WHERE file_id = {placeholder} LIMIT 1;", - column = column, - table = preset, - placeholder = placeholder, - ) -} - -fn build_insert_qry(preset: &str, columns: &Vec<&str>, placeholders: &Vec) -> String { - let columns = columns.join(", "); - let placeholders = placeholders.join(", "); - format!( - "INSERT INTO {table} ({columns}) VALUES ({placeholders});", - table = preset, - columns = columns, - placeholders = placeholders, - ) -} - -fn build_delete_queries(presets: &Vec<&String>, placeholder: &str) -> Vec { - let mut queries = vec![]; - for preset in presets { - queries.push(format!( - "DELETE FROM {table} WHERE file_id = {placeholder};", - table = preset, - placeholder = placeholder, - )) - } - - queries -} - -/// Either extracts the value as a `&[u8]` from the row as `Some(BytesMut)` -/// or becomes `None`. -macro_rules! extract_or_none { - ( $e:expr, $c:expr ) => {{ - match $e { - Ok(row) => { - let row = row?; - let data: &[u8] = row.get($c); - Some(BytesMut::from(data)) - }, - Err(e) => { - error!("failed to fetch row due to error: {:?}", e); - None - }, - } - }}; -} - -/// Builds a SQL query for the given preset (table) from -/// the given data adding place holders for each value for -/// prepared statements. -macro_rules! build_insert { - ( $preset:expr, $data:expr, $placeholder:expr ) => {{ - let mut columns: Vec<&str> = $data - .keys() - .map(|v| to_variant_name(v).expect("unreachable")) - .collect(); - columns.insert(0, "file_id"); - - let values: Vec = $data.values().map(|v| v.clone()).collect(); - - let placeholders: Vec = (1..columns.len() + 1).map($placeholder).collect(); - - (build_insert_qry($preset, &columns, &placeholders), values) - }}; -} - -/// Builds a sqlx query based on the given query string and values -/// -/// This also accounts for the file_id being a uuid vs everything else -/// being bytes. -macro_rules! query_with_parameters { - ( $id:expr, $qry:expr, $values:expr ) => {{ - let mut qry = sqlx::query($qry).bind($id); - - for value in $values { - qry = qry.bind(value) - } - - qry - }}; -} - -/// Deletes a file with a given id from all presets. -/// -/// Due to the nature of the Pool types but the similarity between -/// each database code to delete files it makes more sense to put this -/// in a macro over a function. -macro_rules! delete_file { - ( $id:expr, $presets:expr, $placeholder:expr, $pool:expr ) => {{ - let file_id = $id.to_string(); - let queries = build_delete_queries($presets, $placeholder); - - for qry in queries { - let query = sqlx::query(&qry).bind(&file_id); - query.execute($pool).await?; - } - - let qry = format!( - "DELETE FROM image_metadata WHERE file_id = {}", - $placeholder, - ); - - let query = sqlx::query(&qry).bind($id.to_string()); - query.execute($pool).await?; - }}; -} - -/// Inserts a given file_id into the index table. -/// -/// This table mostly acts as the metadata table for listing files of -/// given categories. -macro_rules! insert_metadata { - ( $file_id:expr, $category:expr, $total:expr, $placeholder:expr, $pool:expr, ) => {{ - let placeholders: String = (1..5).map($placeholder).collect::>().join(", "); - - let qry = format!( - r#" - INSERT INTO image_metadata ( - file_id, - category, - insert_date, - total_size - ) VALUES ({placeholders})"#, - placeholders = placeholders, - ); - - let now = Utc::now(); - - let query = sqlx::query(&qry) - .bind($file_id) - .bind($category) - .bind(now) - .bind($total); - query.execute($pool).await?; - }}; -} - -macro_rules! sum_total { - ( $total:expr, $values:expr ) => {{ - let sum: i64 = $values.values().map(|v| v.len() as i64).sum(); - $total += sum; - }}; -} - -macro_rules! check_category { - ( $file_id:expr, $category:expr, $ph1:expr, $ph2:expr, $pool:expr ) => {{ - let qry = format!( - "SELECT 1 FROM image_metadata WHERE file_id = {} AND category = {};", - $ph1, $ph2, - ); - - sqlx::query(&qry) - .bind($file_id.to_string()) - .bind($category) - .fetch_optional($pool) - .await - .unwrap_or(None) - }}; -} - -macro_rules! apply_filter { - ( $qry:expr, $placeholder:expr, $filter:expr ) => {{ - match $filter { - FilterType::All => (), - FilterType::Category(_) => $qry = format!("{} WHERE category = {}", $qry, $placeholder), - FilterType::CreationDate(_) => { - $qry = format!("{} WHERE insert_date = {}", $qry, $placeholder) - }, - }; - }}; -} - -macro_rules! bind_filter { - ( $query:expr, $filter:expr ) => {{ - match $filter { - FilterType::All => (), - FilterType::Category(v) => $query = $query.bind(v), - FilterType::CreationDate(v) => $query = $query.bind(v), - }; - }}; -} - -macro_rules! from_rows { - ( $rows:expr ) => {{ - $rows - .drain(..) - .map(|v| IndexResult { - file_id: Uuid::from_str(v.get("file_id")).expect("uuid was invalid in database"), - category: v.get("category"), - total_size: v.get("total_size"), - created_on: v.get("insert_date"), - }) - .collect() - }}; -} - -/// A database backend set to handle the PostgreSQL database. -pub struct PostgresBackend { - pool: PgPool, -} - -impl PostgresBackend { - /// Connect to the given PostgreSQL server. - /// - /// This will build a connection pool and connect with a maximum - /// of n connections determined by the `pool_size` of the given - /// config. - pub async fn connect(cfg: DatabaseConfig) -> Result { - let pool = PgPoolOptions::new() - .max_connections(cfg.pool_size) - .connect(&cfg.connection_uri) - .await?; - - Ok(Self { pool }) - } -} - -#[async_trait] -impl DatabaseLinker for PostgresBackend { - async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec) -> Result<()> { - info!("building tables"); - - let query = sqlx::query( - r#" - CREATE TABLE IF NOT EXISTS image_metadata ( - file_id CHAR(36) PRIMARY KEY, - category TEXT, - insert_date TIMESTAMP WITH TIME ZONE, - total_size INTEGER - )"#, - ); - - query.execute(&self.pool).await?; - - let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")]; - - for format in formats { - let column = to_variant_name(&format).expect("unreachable"); - columns.push(format!("{} BYTEA", column)) - } - - for preset in presets { - let qry = format!( - "CREATE TABLE IF NOT EXISTS {table} ({columns})", - table = preset, - columns = columns.join(", ") - ); - - let query = sqlx::query(&qry); - - query.execute(&self.pool).await?; - } - - Ok(()) - } -} - -#[async_trait] -impl ImageStore for PostgresBackend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - check_category!(file_id, category, "$1", "$2", &self.pool)?; - - let column = to_variant_name(&format).expect("unreachable"); - - let qry = build_select_qry(column, &preset, "$1"); - let qry = sqlx::query(&qry).bind(file_id.to_string()); - - extract_or_none!(qry.fetch_optional(&self.pool).await, column) - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let mut total: i64 = 0; - for (preset, preset_data) in data { - sum_total!(total, preset_data); - let (qry, values) = build_insert!(&preset, preset_data, |i| format!("${}", i)); - - let values_ = values.iter().map(|v| v.as_ref()); - let query = query_with_parameters!(file_id.to_string(), &qry, values_); - query.execute(&self.pool).await?; - } - - insert_metadata!( - file_id.to_string(), - category, - total, - |i| format!("${}", i), - &self.pool, - ); - - Ok(()) - } - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> { - delete_file!(file_id, &presets, "$1", &self.pool); - - Ok(()) - } - - async fn list_entities( - &self, - filter: FilterType, - order: OrderBy, - page: usize, - ) -> Result> { - // we start at 1 but the offset should be calculated from 0 - let skip = PAGE_SIZE * (page as i64 - 1); - let order = order.as_str(); - - let mut qry = format!( - r#" - SELECT file_id, category, insert_date, total_size - FROM image_metadata - ORDER BY {} DESC - OFFSET $1 - LIMIT $2 - "#, - order - ); - - apply_filter!(qry, "$3", &filter); - - let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE); - - bind_filter!(query, filter); - - let mut rows = query.fetch_all(&self.pool).await?; - let results = from_rows!(rows); - - Ok(results) - } -} - -/// A database backend set to handle the MySQL / MariaDB database. -pub struct MySQLBackend { - pool: MySqlPool, -} - -impl MySQLBackend { - /// Connect to the given MySQL / MariaDB server. - /// - /// This will build a connection pool and connect with a maximum - /// of n connections determined by the `pool_size` of the given - /// config. - pub async fn connect(cfg: DatabaseConfig) -> Result { - let pool = MySqlPoolOptions::new() - .max_connections(cfg.pool_size) - .connect(&cfg.connection_uri) - .await?; - - Ok(Self { pool }) - } -} - -#[async_trait] -impl DatabaseLinker for MySQLBackend { - async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec) -> Result<()> { - info!("building tables"); - - let query = sqlx::query( - r#" - CREATE TABLE IF NOT EXISTS image_metadata ( - file_id CHAR(36) PRIMARY KEY, - category TEXT, - insert_date TIMESTAMP, - total_size INTEGER - )"#, - ); - - query.execute(&self.pool).await?; - - let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")]; - - for format in formats { - let column = to_variant_name(&format).expect("unreachable"); - columns.push(format!("{} LONGBLOB", column)) - } - - for preset in presets { - let qry = format!( - "CREATE TABLE IF NOT EXISTS {table} ({columns})", - table = preset, - columns = columns.join(", ") - ); - - let query = sqlx::query(&qry); - - query.execute(&self.pool).await?; - } - - Ok(()) - } -} - -#[async_trait] -impl ImageStore for MySQLBackend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - check_category!(file_id, category, "?", "?", &self.pool)?; - - let column = to_variant_name(&format).expect("unreachable"); - - let qry = build_select_qry(column, &preset, "?"); - let query = sqlx::query(&qry).bind(file_id.to_string()); - - extract_or_none!(query.fetch_optional(&self.pool).await, column) - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let mut total: i64 = 0; - for (preset, preset_data) in data { - sum_total!(total, preset_data); - let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string()); - - let values_ = values.iter().map(|v| v.as_ref()); - let query = query_with_parameters!(file_id.to_string(), &qry, values_); - query.execute(&self.pool).await?; - } - - insert_metadata!( - file_id.to_string(), - category, - total, - |_| "?".to_string(), - &self.pool, - ); - - Ok(()) - } - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> { - delete_file!(file_id, &presets, "?", &self.pool); - Ok(()) - } - - async fn list_entities( - &self, - filter: FilterType, - order: OrderBy, - page: usize, - ) -> Result> { - // we start at 1 but the offset should be calculated from 0 - let skip = PAGE_SIZE * (page as i64 - 1); - let order = order.as_str(); - - let mut qry = format!( - r#" - SELECT file_id, category, insert_date, total_size - FROM image_metadata - ORDER BY {} DESC - LIMIT ?, ? - "#, - order - ); - - apply_filter!(qry, "?", &filter); - - let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE); - - bind_filter!(query, filter); - - let mut rows = query.fetch_all(&self.pool).await?; - let results = from_rows!(rows); - - Ok(results) - } -} - -/// A database backend set to handle the Sqlite database. -/// -/// Due to the nature of SQLite this is *not* recommended for use -/// in production being a single file. Consider using something like -/// PostgreSQL or Cassandra in production. -/// -/// This backend requires that the system uses a standard File approach e.g. -/// not im memory / shared memory due to the sqlx::Pool handling. -/// If in-memory is used this can produce undefined behaviour in terms -/// of what data is perceived to be stored. -pub struct SqliteBackend { - pool: SqlitePool, -} - -impl SqliteBackend { - /// Connect to the given Sqlite file. - /// - /// This will build a connection pool and connect with a maximum - /// of n connections determined by the `pool_size` of the given - /// config. - /// - /// Due to the nature of this being a pool setup, in-memory setups are - /// not supported. - pub async fn connect(cfg: DatabaseConfig) -> Result { - let pool = SqlitePoolOptions::new() - .max_connections(cfg.pool_size) - .connect(&cfg.connection_uri) - .await?; - - info!("successfully connected to sqlite"); - - Ok(Self { pool }) - } -} - -#[async_trait] -impl DatabaseLinker for SqliteBackend { - async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec) -> Result<()> { - info!("building tables"); - - let query = sqlx::query( - " - CREATE TABLE IF NOT EXISTS image_metadata ( - file_id CHAR(36) PRIMARY KEY, - category TEXT, - insert_date TEXT, - total_size INTEGER - )", - ); - - query.execute(&self.pool).await?; - info!("metadata table created successfully"); - - let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")]; - - for format in formats { - let column = to_variant_name(&format).expect("unreachable"); - columns.push(format!("{} BLOB", column)) - } - - for preset in presets { - let qry = format!( - "CREATE TABLE IF NOT EXISTS {table} ({columns})", - table = preset, - columns = columns.join(", ") - ); - - let query = sqlx::query(&qry); - - query.execute(&self.pool).await?; - - debug!("created preset table {}", preset); - } - info!("all preset tables created successfully"); - - Ok(()) - } -} - -#[async_trait] -impl ImageStore for SqliteBackend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - check_category!(file_id, category, "?", "?", &self.pool)?; - - let column = to_variant_name(&format).expect("unreachable"); - - let qry = build_select_qry(column, &preset, "?"); - let query = sqlx::query(&qry).bind(file_id.to_string()); - - extract_or_none!(query.fetch_optional(&self.pool).await, column) - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let mut total: i64 = 0; - for (preset, preset_data) in data { - sum_total!(total, preset_data); - - let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string()); - - let values_ = values.iter().map(|v| v.as_ref()); - let query = query_with_parameters!(file_id.to_string(), &qry, values_); - query.execute(&self.pool).await?; - } - - insert_metadata!( - file_id.to_string(), - category, - total, - |_| "?".to_string(), - &self.pool, - ); - - Ok(()) - } - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> { - delete_file!(file_id, &presets, "?", &self.pool); - Ok(()) - } - - async fn list_entities( - &self, - filter: FilterType, - order: OrderBy, - page: usize, - ) -> Result> { - // we start at 1 but the offset should be calculated from 0 - let skip = PAGE_SIZE * (page as i64 - 1); - let order = match order { - OrderBy::CreationDate => "datetime(insert_date)", - OrderBy::TotalSize => "total_size", - }; - - let mut qry = format!( - r#" - SELECT file_id, category, insert_date, total_size - FROM image_metadata - ORDER BY {} DESC - LIMIT ?, ?; - "#, - order - ); - - apply_filter!(qry, "?", &filter); - - let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE); - - bind_filter!(query, filter); - - let mut rows = query.fetch_all(&self.pool).await?; - let results = from_rows!(rows); - - Ok(results) - } -} diff --git a/src/cache.rs b/src/cache.rs deleted file mode 100644 index 82547df..0000000 --- a/src/cache.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::sync::Arc; - -use bytes::BytesMut; -use concread::arcache::{ARCache, ARCacheBuilder}; -use once_cell::sync::OnceCell; -use uuid::Uuid; - -use crate::image::ImageFormat; - -/// The key that acts as the hashed key. -pub type CacheKey = (Uuid, String, ImageFormat); - -/// Cheaply cloneable lock around a LRU cache. -pub type CacheStore = Arc>; - -pub static CACHE_STATE: OnceCell = OnceCell::new(); - -/// A wrapper around the `CacheStore` type letting it be put into Gotham's -/// shared state. -#[derive(Clone)] -pub struct CacheState(pub Option); - -impl CacheState { - /// Creates a new cache state instance with a given size. - pub fn init(cache_size: usize) { - let inst = if cache_size == 0 { - Self { 0: None } - } else { - let store = Arc::new(ARCacheBuilder::new() - .set_size(cache_size, 12) - .build() - .unwrap() - ); - Self { 0: Some(store) } - }; - - let _ = CACHE_STATE.set(inst); - } - - /// Get a item from the cache if it exists otherwise returns None. - pub fn get(&self, file_id: Uuid, preset: String, format: ImageFormat) -> Option { - let state = self.0.as_ref()?; - let ref_val = (file_id, preset, format); - let mut target = state.read(); - target.get(&ref_val).map(|v| v.clone()) - } - - /// Adds an item to the cache, if the cache size is already at it's limit - /// the least recently used (LRU) item is removed. - pub fn set(&self, file_id: Uuid, preset: String, format: ImageFormat, data: BytesMut) { - if let Some(state) = self.0.as_ref() { - let ref_val = (file_id, preset, format); - let mut target = state.write(); - target.insert(ref_val, data); - target.commit(); - } - } -} diff --git a/src/configure.rs b/src/configure.rs deleted file mode 100644 index 6afce37..0000000 --- a/src/configure.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::fs::read_to_string; -use std::sync::Arc; - -use gotham_derive::StateData; -use hashbrown::HashMap; -use serde::{Deserialize, Serialize}; - -use crate::image::ImageFormat; -use crate::storage::DatabaseBackend; - -/// The size of the pages when listing indexes via the admin panel. -pub const PAGE_SIZE: i64 = 50; - -/// A cheaply cloneable version of the given configuration -/// for shared state middleware. -#[derive(Clone, StateData)] -pub struct StateConfig(pub Arc); - -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum LogLevel { - Off, - Info, - Debug, - Error, -} - -/// A given size of a preset. -/// Any uploaded images will be automatically duplicated and resized in this -/// preset. -#[derive(Deserialize)] -pub struct SizingPreset { - pub width: u32, - pub height: u32, -} - -#[derive(Deserialize)] -pub struct Config { - pub log_level: LogLevel, - pub host: String, - pub port: u16, - pub base_data_path: String, - pub formats: HashMap, - pub database_backend: DatabaseBackend, - pub size_presets: HashMap, - pub default_serving_preset: String, - pub default_serving_format: ImageFormat, - pub webp_quality: Option, - pub webp_compression: Option, - pub webp_method: Option, - pub webp_threading: Option, - pub cache_size: usize, -} - -impl Config { - pub fn from_file(file: &str) -> anyhow::Result { - let data = read_to_string(file)?; - Ok(serde_json::from_str::(&data)?) - } - - pub fn template(backend: &str) -> anyhow::Result { - let config = match backend.to_lowercase().as_str() { - "redis" => json!({ - "type": "redis", - "config": { - "connection_uri": "redis://user:pass@localhost/0", - "pool_size": 12, - } - }), - "cassandra" => json!({ - "type": "cassandra", - "config": { - "clusters": [ - "ip:port", - "ip:port", - "ip:port", - ], - "keyspace": { - "strategy": "SimpleStrategy", - "spec": { - "replication_factor": 3 - } - }, - "user": "", - "password": "", - } - }), - "postgres" => json!({ - "type": "postgres", - "config": { - "connection_uri": "postgres://user:pass@localhost/foo", - "pool_size": 10, - } - }), - "mysql" => json!({ - "type": "mysql", - "config": { - "connection_uri": "mysql://user:pass@localhost/foo", - "pool_size": 10, - } - }), - "sqlite" => json!({ - "type": "sqlite", - "config": { - "connection_uri": "sqlite://database.db", - "pool_size": 10, - } - }), - _ => return Err(anyhow::Error::msg("invalid database backend given")), - }; - - Ok(json!({ - "log_level": LogLevel::Info, - "host": "127.0.0.1", - "port": 7070, - "base_data_path": "/images", - "formats": { - "png": true, - "jpeg": true, - "gif": false, - "webp": true, - }, - "database_backend": config, - "size_presets": { - "small": { - "width": 32, - "height": 32, - }, - "medium": { - "width": 64, - "height": 64, - }, - "large": { - "width": 128, - "height": 128, - }, - }, - "default_serving_preset": "original", - "default_serving_format": "webp", - "webp_quality": None::, - "webp_compression": Some(50), - "webp_method": Some(4), - "webp_threading": Some(true), - "cache_size": 500, - })) - } -} diff --git a/src/context.rs b/src/context.rs deleted file mode 100644 index 71e1709..0000000 --- a/src/context.rs +++ /dev/null @@ -1,58 +0,0 @@ -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -/// A set of filters that can be used to view -/// entities via the REST API on the admin panel. -/// -/// Example: -/// -/// ```json -/// { -/// "filter": { -/// "filter_type": "category", -/// "with_value": "cats", -/// } -/// } -/// ``` -#[derive(Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase", tag = "filter_type", content = "with_value")] -pub enum FilterType { - All, - Category(String), - CreationDate(DateTime), -} - -/// How the data should be ordered when requesting the -/// index list. -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum OrderBy { - CreationDate, - TotalSize, -} - -impl OrderBy { - pub fn as_str(&self) -> &str { - match self { - OrderBy::CreationDate => "insert_date", - OrderBy::TotalSize => "total_size", - } - } -} - -/// A result when listing all items in the server. -#[derive(Serialize)] -pub struct IndexResult { - pub file_id: Uuid, - pub category: String, - pub total_size: i32, - pub created_on: DateTime, -} - -#[derive(Deserialize)] -pub struct FilesListPayload { - pub filter: Option, - pub order: Option, - pub page: Option, -} diff --git a/src/image.rs b/src/image.rs deleted file mode 100644 index 8e9ae0d..0000000 --- a/src/image.rs +++ /dev/null @@ -1,241 +0,0 @@ -use std::sync::Arc; -use std::time::Instant; - -use anyhow::Result; -use bytes::{BufMut, BytesMut}; -use gotham::state::{FromState, State}; -use gotham_derive::{StateData, StaticResponseExtender}; -use hashbrown::HashMap; -use image::{imageops, load_from_memory_with_format, DynamicImage}; -use log::{debug, error}; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; -use webp::Encoder; - -use crate::configure::StateConfig; -use crate::storage::StorageBackend; -use crate::traits::ImageStore; - -pub type ImageData = HashMap; -pub type ImagePresetsData = HashMap; - -pub type ImageDataSizes = HashMap; -pub type ImagePresetDataSizes = HashMap; - -#[derive(Debug, Clone, Ord, PartialOrd, Hash, Eq, PartialEq, Serialize, Deserialize, Copy)] -#[serde(rename_all = "lowercase")] -pub enum ImageFormat { - Png, - Jpeg, - Gif, - WebP, -} - -#[derive(Deserialize, StateData, StaticResponseExtender)] -pub struct ImageGet { - pub format: Option, - pub encode: Option, - pub preset: Option, -} - -#[derive(Deserialize)] -pub struct ImageUpload { - pub format: ImageFormat, - pub data: String, - pub category: Option, -} - -#[derive(Serialize)] -pub struct ImageUploaded { - pub file_id: Uuid, - pub formats: ImagePresetDataSizes, - pub category: String, -} - -#[derive(Deserialize, StateData, StaticResponseExtender)] -pub struct ImageRemove { - pub file_id: Uuid, -} - -macro_rules! convert { - ( $e:expr, $d:expr ) => {{ - || -> anyhow::Result { - let buff = BytesMut::new(); - let mut writer = buff.writer(); - let start = Instant::now(); - $e.write_to(&mut writer, $d)?; - debug!("format {:?} conversion took {:?}", $d, start.elapsed()); - Ok(writer.into_inner()) - }() - }}; -} - -macro_rules! generate { - ( $n:expr, $e:expr, $hm1:expr, $hm2:expr, $cfg:expr ) => ({ - let (data, sizes) = convert_image($e, $cfg).await?; - $hm1.insert($n.to_string(), sizes); - $hm2.insert($n.to_string(), data); - }) -} - -macro_rules! is_enabled { - ( $format:expr, $options:expr ) => {{ - $options.get(&$format).map(|v| *v).unwrap_or(true) - }}; -} - -macro_rules! log_err { - ( $result:expr, $msg:expr ) => {{ - match &$result { - Ok(_) => (), - Err(e) => error!("{} {:?}", $msg, e), - }; - - $result - }}; -} - -fn spawn_conversion( - img: Arc, - format: ImageFormat, - convert_to_format: image::ImageFormat, -) -> Result<(ImageFormat, BytesMut)> { - let img: BytesMut = log_err!( - convert!(img, convert_to_format), - format!("failed to convert {:?}: ", convert_to_format) - )?; - - return Ok((format, img)); -} - -async fn convert_image( - img: Arc, - cfg: StateConfig, -) -> Result<(ImageData, ImageDataSizes)> { - let mut resulting_sizes = HashMap::with_capacity(4); - let mut resulting_data = HashMap::with_capacity(4); - - let mut handles = vec![]; - - if is_enabled!(ImageFormat::Png, cfg.0.formats) { - let cloned = img.clone(); - let handle = tokio::task::spawn_blocking(move || { - spawn_conversion(cloned, ImageFormat::Png, image::ImageFormat::Png) - }); - handles.push(handle); - } - - if is_enabled!(ImageFormat::Jpeg, cfg.0.formats) { - let cloned = img.clone(); - let handle = tokio::task::spawn_blocking(move || { - spawn_conversion(cloned, ImageFormat::Jpeg, image::ImageFormat::Jpeg) - }); - handles.push(handle); - } - - if is_enabled!(ImageFormat::Gif, cfg.0.formats) { - let cloned = img.clone(); - let handle = tokio::task::spawn_blocking(move || { - spawn_conversion(cloned, ImageFormat::Gif, image::ImageFormat::Gif) - }); - handles.push(handle); - } - - // This is the slowest conversion, maybe change?? - // Updated: New encoder allows for multi threading encoding. - if is_enabled!(ImageFormat::WebP, cfg.0.formats) { - let cloned = img.clone(); - let handle = tokio::task::spawn_blocking(move || -> Result<(ImageFormat, BytesMut)> { - let start = Instant::now(); - let raw = Encoder::from_image(cloned.as_ref()).encode(); - debug!( - "format {:?} conversion took {:?}", - image::ImageFormat::WebP, - start.elapsed() - ); - let webp = BytesMut::from(raw.as_ref()); - - Ok((ImageFormat::WebP, webp)) - }); - handles.push(handle); - } - - for handle in handles { - let (format, data) = handle.await??; - resulting_sizes.insert(format, data.len()); - resulting_data.insert(format, data); - } - - Ok((resulting_data, resulting_sizes)) -} - -pub async fn process_new_image( - state: &mut State, - category: &str, - format: ImageFormat, - data: Vec, -) -> Result<(Uuid, ImagePresetDataSizes)> { - let cfg = StateConfig::take_from(state); - let storage = StorageBackend::take_from(state); - - let fmt = match format { - ImageFormat::Png => image::ImageFormat::Png, - ImageFormat::Jpeg => image::ImageFormat::Jpeg, - ImageFormat::Gif => image::ImageFormat::Gif, - ImageFormat::WebP => image::ImageFormat::WebP, - }; - - let presets = &cfg.0.size_presets; - let mut converted_sizes = HashMap::with_capacity(presets.len()); - let mut converted_data = HashMap::with_capacity(presets.len()); - let original = Arc::from(log_err!( - load_from_memory_with_format(&data, fmt), - "failed to load format due to exception: " - )?); - generate!( - "original", - original.clone(), - converted_sizes, - converted_data, - cfg.clone() - ); - - for (preset_name, size) in presets { - let cloned = original.clone(); - let im = Arc::new(cloned.resize(size.width, size.height, imageops::FilterType::Nearest)); - - generate!( - preset_name, - im, - converted_sizes, - converted_data, - cfg.clone() - ); - } - - let file_id = Uuid::new_v4(); - storage.add_image(file_id, category, converted_data).await?; - - Ok((file_id, converted_sizes)) -} - -pub async fn get_image( - state: &mut State, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, -) -> Option { - let storage = StorageBackend::take_from(state); - storage.get_image(file_id, preset, category, format).await -} - -pub async fn delete_image(state: &mut State, file_id: Uuid) -> Result<()> { - let storage = StorageBackend::take_from(state); - let cfg = StateConfig::take_from(state); - - let presets = cfg.0.size_presets.keys().collect(); - storage.remove_image(file_id, presets).await?; - - Ok(()) -} diff --git a/src/main.rs b/src/main.rs index 9ffdae7..e69de29 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,250 +0,0 @@ -mod backends; -mod cache; -mod configure; -mod context; -mod image; -mod response; -mod routes; -mod storage; -mod traits; - -#[macro_use] -extern crate serde_json; - -use std::net::SocketAddr; -use std::sync::Arc; - -use anyhow::Result; -use clap::{App, Arg, ArgMatches, SubCommand}; -use gotham::middleware::logger::SimpleLogger as GothSimpleLogger; -use gotham::middleware::state::StateMiddleware; -use gotham::pipeline::new_pipeline; -use gotham::pipeline::single::single_pipeline; -use gotham::router::builder::{build_router, DefineSingleRoute, DrawRoutes}; -use gotham::router::Router; -use gotham_derive::{StateData, StaticResponseExtender}; -use log::{info, LevelFilter}; -use serde::Deserialize; -use tokio::fs; -use uuid::Uuid; - -use crate::configure::{LogLevel, StateConfig}; -use crate::image::{ImageFormat, ImageGet, ImageRemove}; -use crate::storage::{DatabaseBackend, StorageBackend}; -use crate::traits::DatabaseLinker; - -/// A regex string for validating uuids in the request path. -static UUID_REGEX: &str = - "[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$"; - -/// A regex for separating out the category sections -static CATEGORY_REGEX: &str = "[a-zA-Z0-9]+"; - -/// A simple extractor for taking the file_id out of the path -/// of the request as a UUID. -#[derive(Deserialize, StateData, StaticResponseExtender)] -struct PathExtractor { - category: Option, - file_id: Uuid, -} - -/// Constructs all the routes for the server. -fn router(backend: storage::StorageBackend, config: StateConfig) -> Result { - let base = config.0.base_data_path.clone(); - - let cache_size = config.0.cache_size; - cache::CacheState::init(cache_size); - - let pipeline = new_pipeline() - .add(GothSimpleLogger::new(log::Level::Info)) - .add(StateMiddleware::new(backend)) - .add(StateMiddleware::new(config)) - .build(); - let (chain, pipelines) = single_pipeline(pipeline); - - Ok(build_router(chain, pipelines, |route| { - route - .get(&format!("{}/:file_id:{}", base, UUID_REGEX)) - .with_path_extractor::() - .with_query_string_extractor::() - .to_async(routes::get_file); - - route - .get(&format!( - "{}/:category:{}/:file_id:{}", - base, CATEGORY_REGEX, UUID_REGEX - )) - .with_path_extractor::() - .with_query_string_extractor::() - .to_async(routes::get_file); - - route.post("admin/create/image").to_async(routes::add_file); - route - .delete(&format!("admin/delete/image/:file_id:{}", UUID_REGEX)) - .with_path_extractor::() - .to_async(routes::remove_file); - - route.post("admin/list").to_async(routes::list_files); - })) -} - -/// This will initialise the logger as well as -/// start server and parse args (although not in that order). -#[tokio::main] -async fn main() -> Result<()> { - let cli_args = parse_args(); - let (name, args) = cli_args.subcommand(); - match name { - "init" => run_init(args.unwrap()).await, - "run" => run_server(args.unwrap()).await, - other => { - return Err(anyhow::Error::msg(format!( - "command {} is not supported, only commands (init, run) are supported", - other, - ))) - }, - }?; - - Ok(()) -} - -async fn run_init(args: &ArgMatches<'_>) -> Result<()> { - let target_backend = args.value_of("backend").expect("backend value not given"); - - let example = configure::Config::template(target_backend)?; - let out = serde_json::to_string_pretty(&example)?; - fs::write("./config.json", out).await?; - - Ok(()) -} - -async fn run_server(args: &ArgMatches<'_>) -> Result<()> { - let cfg = if let Some(cfg) = args.value_of("config") { - configure::Config::from_file(cfg) - } else { - return Err(anyhow::Error::msg( - "missing required config file, exiting...", - )); - }?; - - let (goth_lvl, lust_lvl) = match cfg.log_level { - LogLevel::Off => (LevelFilter::Off, LevelFilter::Off), - LogLevel::Info => (LevelFilter::Info, LevelFilter::Info), - LogLevel::Debug => (LevelFilter::Info, LevelFilter::Debug), - LogLevel::Error => (LevelFilter::Error, LevelFilter::Error), - }; - - if std::env::var_os("RUST_LOG").is_none() { - std::env::set_var("RUST_LOG", format!("warn,lust={},gotham={}", lust_lvl, goth_lvl)); - } - - pretty_env_logger::init(); - - let lossless = cfg.webp_quality.is_none(); - let quality = if lossless { - cfg.webp_compression.unwrap_or(50f32) - } else { - cfg.webp_quality.unwrap() - }; - let threading = cfg.webp_threading.unwrap_or(true); - let method = cfg.webp_method.unwrap_or(4) as i32; - info!( - "setting up webp state. \ - Lossless: {}, \ - Quality: {}, \ - Method: {}, \ - Threading: {}", - lossless, quality, method, threading - ); - webp::init_global(lossless, quality, method, threading); - - let fields: Vec = cfg - .formats - .iter() - .filter_map( - |(format, enabled)| { - if *enabled { - Some(*format) - } else { - None - } - }, - ) - .collect(); - - let mut presets: Vec<&str> = cfg.size_presets.keys().map(|v| v.as_str()).collect(); - presets.push("original"); - - let backend: StorageBackend = match cfg.database_backend.clone() { - DatabaseBackend::Redis(db_cfg) => { - let mut db = backends::redis::Backend::connect(db_cfg).await?; - db.ensure_tables(presets, fields).await?; - let _ = storage::REDIS.set(db); - StorageBackend::Redis - }, - DatabaseBackend::Cassandra(db_cfg) => { - let mut db = backends::cql::Backend::connect(db_cfg).await?; - db.ensure_tables(presets, fields).await?; - let _ = storage::CASSANDRA.set(db); - StorageBackend::Cassandra - }, - DatabaseBackend::Postgres(db_cfg) => { - let mut db = backends::sql::PostgresBackend::connect(db_cfg).await?; - db.ensure_tables(presets, fields).await?; - let _ = storage::POSTGRES.set(db); - StorageBackend::Postgres - }, - DatabaseBackend::MySQL(db_cfg) => { - let mut db = backends::sql::MySQLBackend::connect(db_cfg).await?; - db.ensure_tables(presets, fields).await?; - let _ = storage::MYSQL.set(db); - StorageBackend::MySQL - }, - DatabaseBackend::Sqlite(db_cfg) => { - let mut db = backends::sql::SqliteBackend::connect(db_cfg).await?; - db.ensure_tables(presets, fields).await?; - let _ = storage::SQLITE.set(db); - StorageBackend::Sqlite - }, - }; - - let addr: SocketAddr = format!("{}:{}", &cfg.host, cfg.port).parse()?; - let state_cfg = StateConfig(Arc::new(cfg)); - let _ = gotham::init_server(addr, router(backend, state_cfg)?).await; - - Ok(()) -} - -fn parse_args() -> ArgMatches<'static> { - App::new("Lust") - .version("0.1.0") - .author("Harrison Burt ") - .about("A powerful automatic image server.") - .subcommand( - SubCommand::with_name("init") - .about("Initialises the workspace with a configuration file") - .version("0.1.0") - .arg( - Arg::with_name("backend") - .short("b") - .long("backend") - .help("The target database backend") - .takes_value(true) - .required(true), - ), - ) - .subcommand( - SubCommand::with_name("run") - .about("Runs the server with the given configuration") - .version("0.1.0") - .arg( - Arg::with_name("config") - .short("c") - .long("config") - .help("The path to a given config file in JSON format.") - .takes_value(true) - .default_value("config.json"), - ), - ) - .get_matches() -} diff --git a/src/response.rs b/src/response.rs deleted file mode 100644 index 76ce55a..0000000 --- a/src/response.rs +++ /dev/null @@ -1,54 +0,0 @@ -use bytes::BytesMut; -use gotham::hyper::http::{header, Response, StatusCode}; -use gotham::hyper::Body; -use headers::{ContentType, HeaderMapExt}; -use serde_json::Value; - -use crate::image::ImageFormat; - -/// A standard JSON response with the content type set to application/json -pub fn json_response(status: StatusCode, data: Option) -> Response { - let payload = json!({ - "status": status.as_u16(), - "data": data, - }); - - let mut resp = Response::builder() - .status(status) - .body(Body::from(serde_json::to_vec(&payload).unwrap())) - .unwrap(); - - resp.headers_mut().typed_insert(ContentType::json()); - - resp -} - -pub fn image_response(format: ImageFormat, data: BytesMut) -> Response { - let mut resp = Response::builder() - .status(StatusCode::OK) - .body(Body::from(data.to_vec())) - .unwrap(); - - let content_type = match format { - ImageFormat::Png => "image/png", - ImageFormat::Jpeg => "image/jpeg", - ImageFormat::Gif => "image/gif", - ImageFormat::WebP => "image/webp", - }; - - resp.headers_mut() - .insert(header::CONTENT_TYPE, content_type.parse().unwrap()); - - resp -} - -pub fn empty_response(status: StatusCode) -> Response { - let mut resp = Response::builder() - .status(status) - .body(Body::from(Vec::new())) - .unwrap(); - - resp.headers_mut().typed_insert(ContentType::text_utf8()); - - resp -} diff --git a/src/routes.rs b/src/routes.rs deleted file mode 100644 index 729f0e0..0000000 --- a/src/routes.rs +++ /dev/null @@ -1,290 +0,0 @@ -use base64::{decode, encode}; -use gotham::handler::HandlerResult; -use gotham::hyper::http::StatusCode; -use gotham::hyper::{body, Body}; -use gotham::state::{FromState, State}; -use log::{debug, error}; - -use crate::cache::CACHE_STATE; -use crate::configure::StateConfig; -use crate::context::{FilesListPayload, FilterType, OrderBy}; -use crate::image::{ - delete_image, - get_image, - process_new_image, - ImageGet, - ImageRemove, - ImageUpload, - ImageUploaded, -}; -use crate::response::{empty_response, image_response, json_response}; -use crate::storage::StorageBackend; -use crate::traits::ImageStore; -use crate::PathExtractor; - -macro_rules! from_body { - ( $e:expr ) => {{ - let res = body::to_bytes(Body::take_from(&mut $e)).await; - let bod = match res { - Ok(bod) => bod, - Err(e) => { - error!("failed to read data from body {:?}", &e); - return Ok(( - $e, - json_response( - StatusCode::INTERNAL_SERVER_ERROR, - Some(json!({ - "message": format!("encountered exception: {:?}", e) - })), - ), - )); - } - }; - - match serde_json::from_slice(bod.as_ref()) { - Ok(v) => v, - Err(e) => { - return Ok(( - $e, - json_response( - StatusCode::UNPROCESSABLE_ENTITY, - Some(json!({ - "message": - format!( - "failed to deserialize POST body due to the following error: {:?}", - e - ) - })), - ), - )) - } - } - }}; -} - -/// Gets a given image from the storage backend with the given -/// preset and format if it does not already exist in cache. -/// -/// This endpoint can return any of the following status codes: -/// -/// 404: -/// The image does not exist, NOTE: This endpoint will **always** -/// return a 404 if an unexpected error was encountered rather than -/// raising an error to the requester, instead it will be logged in -/// the console. -/// -/// 200: -/// The image was successfully fetched and sent as the response. -/// -/// TODO: -/// Likely performance issues could become apparent at higher -/// concurrency due to the Mutex on the LRU cache, although this -/// is probably insignificant compared to the time spent on IO. -pub async fn get_file(mut state: State) -> HandlerResult { - let path_vars = PathExtractor::take_from(&mut state); - let params = ImageGet::take_from(&mut state); - let config = StateConfig::take_from(&mut state); - - let file_id = path_vars.file_id; - let category = path_vars.category.unwrap_or_else(|| "default".to_string()); - - let format = params - .format - .unwrap_or_else(|| config.0.default_serving_format.clone()); - - let mut preset = params - .preset - .unwrap_or_else(|| config.0.default_serving_preset.clone()); - - if preset != "original" { - // We dont want to necessarily error if you give an invalid - // preset, but we dont want to attempt something that doesnt - // exist. - if !config.0.size_presets.contains_key(&preset) { - preset = "original".into(); - } - } - - let cache = CACHE_STATE.get().expect("not initialised"); - let img = if let Some(cached) = cache.get(file_id, preset.clone(), format) { - debug!( - "using cached version of image for file_id: {}, preset: {}, format: {:?}", - file_id, &preset, format, - ); - Some(cached) - } else { - debug!( - "using backend version of image for file_id: {}, preset: {}, format: {:?}", - file_id, &preset, format, - ); - if let Some(data) = get_image(&mut state, file_id, preset.clone(), &category, format).await - { - cache.set(file_id, preset, format, data.clone()); - Some(data) - } else { - None - } - }; - - match img { - None => Ok((state, empty_response(StatusCode::NOT_FOUND))), - Some(data) => { - if params.encode.unwrap_or(false) { - let encoded = encode(data.as_ref()); - return Ok(( - state, - json_response( - StatusCode::OK, - Some(json!({ - "image": encoded, - })), - ), - )); - } - Ok((state, image_response(format, data))) - }, - } -} - -/// Handles a POST request for adding a image to the store. -/// -/// The image payload must be in JSON format and be base64 encoded in -/// the standard specification. -/// -/// E.g. -/// ```json -/// { -/// "format": "png", -/// "data": "...data ensues..." -/// } -/// ``` -pub async fn add_file(mut state: State) -> HandlerResult { - let upload: ImageUpload = from_body!(state); - - let format = upload.format; - let data = match decode(upload.data) { - Ok(d) => d, - Err(_) => { - return Ok(( - state, - json_response( - StatusCode::UNPROCESSABLE_ENTITY, - Some(json!({ - "message": "data is not encoded in base64 format correctly", - })), - ), - )) - }, - }; - - let category = upload.category.unwrap_or_else(|| "default".to_string()); - - let (file_id, formats) = match process_new_image(&mut state, &category, format, data).await { - Ok(v) => v, - Err(e) => { - return Ok(( - state, - json_response( - StatusCode::INTERNAL_SERVER_ERROR, - Some(json!({ - "message": format!("failed to process image: {:?}", e), - })), - ), - )); - }, - }; - - let resp = ImageUploaded { - file_id, - formats, - category, - }; - - let resp = serde_json::to_value(resp).expect("failed to serialize uploaded stats"); - - Ok((state, json_response(StatusCode::OK, Some(resp)))) -} - -/// Handles removing a image from the store. -/// -/// This removes the image from both the database backend and -/// the cache if it exists in there. -/// -/// This only requires the UUID of the image no other information -/// is needed. -/// -/// Note on semantics: -/// This endpoint does not check if the image exists or not, -/// it simply tries to remove it if it exists otherwise ignores it. -/// -/// For that reason this will always return 200 if no exceptions -/// happened at the time. -/// -/// This endpoint can return any of the following responses: -/// -/// 500: -/// The server could not complete the request due to a unexpected -/// exception, this is typically only possible via the transaction -/// on the database backend failing. -/// -/// 200: -/// The image has been removed successfully. -pub async fn remove_file(mut state: State) -> HandlerResult { - let params = ImageRemove::take_from(&mut state); - - if let Err(e) = delete_image(&mut state, params.file_id).await { - return Ok(( - state, - json_response( - StatusCode::INTERNAL_SERVER_ERROR, - Some(json!({ - "message": format!( - "failed to delete image with id: {} due to the following exception: {:?}", - params.file_id, - e - ) - })), - ), - )); - }; - - Ok(( - state, - json_response( - StatusCode::OK, - Some(json!({ - "message": "file deleted if exists", - "file_id": params.file_id.to_string() - })), - ), - )) -} - -pub async fn list_files(mut state: State) -> HandlerResult { - let payload: FilesListPayload = from_body!(state); - let storage = StorageBackend::take_from(&mut state); - - let filter = payload.filter.unwrap_or_else(|| FilterType::All); - let sort = payload.order.unwrap_or_else(|| OrderBy::CreationDate); - let page = payload.page.unwrap_or_else(|| 1usize); - - let (status, payload) = match storage.list_entities(filter.clone(), sort, page).await { - Ok(results) => ( - StatusCode::OK, - Some(json!({ - "page": page, - "filtered_by": filter, - "ordered_by": sort, - "results": results, - })), - ), - Err(e) => ( - StatusCode::INTERNAL_SERVER_ERROR, - Some(json!({ - "message": format!("failed to fetch results for page due to error: {:?}", e) - })), - ), - }; - - Ok((state, json_response(status, payload))) -} diff --git a/src/storage.rs b/src/storage.rs deleted file mode 100644 index f6b29fa..0000000 --- a/src/storage.rs +++ /dev/null @@ -1,137 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; -use bytes::BytesMut; -use gotham_derive::StateData; -use log::error; -use once_cell::sync::OnceCell; -use serde::Deserialize; -use uuid::Uuid; - -use crate::backends; -use crate::context::{FilterType, IndexResult, OrderBy}; -use crate::image::{ImageFormat, ImagePresetsData}; -use crate::traits::ImageStore; - -// The bellow definitions are a hack, this is due to -pub(crate) static REDIS: OnceCell = OnceCell::new(); -pub(crate) static CASSANDRA: OnceCell = OnceCell::new(); -pub(crate) static POSTGRES: OnceCell = OnceCell::new(); -pub(crate) static MYSQL: OnceCell = OnceCell::new(); -pub(crate) static SQLITE: OnceCell = OnceCell::new(); - -#[derive(Clone, Deserialize)] -#[serde(rename_all = "lowercase", tag = "type", content = "config")] -pub enum DatabaseBackend { - Redis(backends::redis::RedisConfig), - Cassandra(backends::cql::DatabaseConfig), - Postgres(backends::sql::DatabaseConfig), - MySQL(backends::sql::DatabaseConfig), - Sqlite(backends::sql::DatabaseConfig), -} - -macro_rules! acquire { - ( $e:expr ) => {{ - $e.get().expect("backend not initialised") - }}; -} - -#[derive(Copy, Clone, StateData)] -pub enum StorageBackend { - Redis, - Cassandra, - Postgres, - MySQL, - Sqlite, -} - -#[async_trait] -impl ImageStore for StorageBackend { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option { - match self { - Self::Redis => { - acquire!(REDIS) - .get_image(file_id, preset, category, format) - .await - }, - Self::Cassandra => { - acquire!(CASSANDRA) - .get_image(file_id, preset, category, format) - .await - }, - Self::Postgres => { - acquire!(POSTGRES) - .get_image(file_id, preset, category, format) - .await - }, - Self::MySQL => { - acquire!(MYSQL) - .get_image(file_id, preset, category, format) - .await - }, - Self::Sqlite => { - acquire!(SQLITE) - .get_image(file_id, preset, category, format) - .await - }, - } - } - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> { - let res = match self { - Self::Redis => acquire!(REDIS).add_image(file_id, category, data).await, - Self::Cassandra => acquire!(CASSANDRA).add_image(file_id, category, data).await, - Self::Postgres => acquire!(POSTGRES).add_image(file_id, category, data).await, - Self::MySQL => acquire!(MYSQL).add_image(file_id, category, data).await, - Self::Sqlite => acquire!(SQLITE).add_image(file_id, category, data).await, - }; - - if let Err(e) = &res { - error!("failed to add image {:?}", e); - } - - res - } - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> { - let res = match self { - Self::Redis => acquire!(REDIS).remove_image(file_id, presets).await, - Self::Cassandra => acquire!(CASSANDRA).remove_image(file_id, presets).await, - Self::Postgres => acquire!(POSTGRES).remove_image(file_id, presets).await, - Self::MySQL => acquire!(MYSQL).remove_image(file_id, presets).await, - Self::Sqlite => acquire!(SQLITE).remove_image(file_id, presets).await, - }; - - if let Err(e) = &res { - error!("failed to remove image {:?}", e); - } - - res - } - - async fn list_entities( - &self, - filter: FilterType, - order: OrderBy, - page: usize, - ) -> Result> { - let res = match self { - Self::Redis => acquire!(REDIS).list_entities(filter, order, page).await, - Self::Cassandra => acquire!(CASSANDRA).list_entities(filter, order, page).await, - Self::Postgres => acquire!(POSTGRES).list_entities(filter, order, page).await, - Self::MySQL => acquire!(MYSQL).list_entities(filter, order, page).await, - Self::Sqlite => acquire!(SQLITE).list_entities(filter, order, page).await, - }; - - if let Err(e) = &res { - error!("failed to list images {:?}", e); - } - - res - } -} diff --git a/src/traits.rs b/src/traits.rs deleted file mode 100644 index b4d2f76..0000000 --- a/src/traits.rs +++ /dev/null @@ -1,34 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; -use bytes::BytesMut; -use uuid::Uuid; - -use crate::context::{FilterType, IndexResult, OrderBy}; -use crate::image::{ImageFormat, ImagePresetsData}; - -#[async_trait] -pub trait DatabaseLinker { - async fn ensure_tables(&mut self, presets: Vec<&str>, columns: Vec) -> Result<()>; -} - -#[async_trait] -pub trait ImageStore { - async fn get_image( - &self, - file_id: Uuid, - preset: String, - category: &str, - format: ImageFormat, - ) -> Option; - - async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()>; - - async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()>; - - async fn list_entities( - &self, - filter: FilterType, - order: OrderBy, - page: usize, - ) -> Result>; -} diff --git a/tests/backend_runners/keydb/docker-compose.yml b/tests/backend_runners/keydb/docker-compose.yml deleted file mode 100644 index 6992869..0000000 --- a/tests/backend_runners/keydb/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3' - -services: - keydb: - image: eqalpha/keydb:latest - container_name: some-keydb - ports: - - "6379:6379" - volumes: - - ./storage:/data - - ./keydb.conf:/etc/keydb/keydb.conf \ No newline at end of file diff --git a/tests/backend_runners/keydb/keydb.conf b/tests/backend_runners/keydb/keydb.conf deleted file mode 100644 index fbe2ff6..0000000 --- a/tests/backend_runners/keydb/keydb.conf +++ /dev/null @@ -1,1574 +0,0 @@ -# KeyDB configuration file example. -# -# Note that in order to read the configuration file, KeyDB must be -# started with the file path as first argument: -# -# ./keydb-server /path/to/keydb.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all KeyDB servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or KeyDB Sentinel. Since KeyDB always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, KeyDB listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running KeyDB is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force KeyDB to listen only into -# the IPv4 loopback interface address (this means KeyDB will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 0.0.0.0 - -# Protected mode is a layer of security protection, in order to avoid that -# KeyDB instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to KeyDB -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified KeyDB will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so KeyDB will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/keydb.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# KeyDB default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default KeyDB does not run as a daemon. Use 'yes' if you need it. -# Note that KeyDB will write a pid file in /var/run/keydb.pid when daemonized. -daemonize no - -# If you run KeyDB from upstart or systemd, KeyDB can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting KeyDB into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, KeyDB writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/keydb.pid". -# -# Creating a pid file is best effort: if KeyDB is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/keydb_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# KeyDB to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident keydb - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default KeyDB shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default KeyDB will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again KeyDB will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the KeyDB server -# and persistence, you may want to disable this feature so that KeyDB will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Replica replication. Use replicaof to make a KeyDB instance a copy of -# another KeyDB server. A few things to understand ASAP about KeyDB replication. -# -# +------------------+ +---------------+ -# | Master | ---> | Replica | -# | (receive writes) | | (exact copy) | -# +------------------+ +---------------+ -# -# 1) KeyDB replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of replicas. -# 2) KeyDB replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters -# and resynchronize with them. -# -# replicaof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the replica request. -# -# masterauth -# -# However this is not enough if you are using KeyDB ACLs (for Redis version -# 6 or greater), and the default user is not capable of running the PSYNC -# command and/or other commands needed for replication. In this case it's -# better to configure a special user to use with replication, and specify the -# masteruser configuration as such: -# -# masteruser -# -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . - -# When a replica loses its connection with the master, or when the replication -# is still in progress, the replica can act in two different ways: -# -# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if replica-serve-stale-data is set to 'no' the replica will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, -# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, -# COMMAND, POST, HOST: and LATENCY. -# -replica-serve-stale-data yes - -# You can configure a replica instance to accept writes or not. Writing against -# a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default replicas are read-only. -# -# Note: read only replicas are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only replica exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only replicas using 'rename-command' to shadow all the -# administrative / dangerous commands. -replica-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New replicas and reconnecting replicas that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the replicas. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The KeyDB master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the replicas incrementally. -# 2) Diskless: The KeyDB master creates a new process that directly writes the -# RDB file to replica sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more replicas -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new replicas arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple replicas -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the replicas. -# -# This is important since once the transfer starts, it is not possible to serve -# new replicas arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more replicas arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Replicas send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_replica_period option. The default value is 10 -# seconds. -# -# repl-ping-replica-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of replica. -# 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the replica socket after SYNC? -# -# If you select "yes" KeyDB will use a smaller number of TCP packets and -# less bandwidth to send data to replicas. But this can add a delay for -# the data to appear on the replica side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the replica side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# replica data when replicas are disconnected for some time, so that when a replica -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the replica missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the replica can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a replica connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected replicas for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last replica disconnected, for -# the backlog buffer to be freed. -# -# Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the replicas: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The replica priority is an integer number published by KeyDB in the INFO output. -# It is used by KeyDB Sentinel in order to select a replica to promote into a -# master if the master is no longer working correctly. -# -# A replica with a low priority number is considered better for promotion, so -# for instance if there are three replicas with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by -# KeyDB Sentinel for promotion. -# -# By default the priority is 100. -replica-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N replicas connected, having a lag less or equal than M seconds. -# -# The N replicas need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the replica, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough replicas -# are available, to the specified number of seconds. -# -# For example to require at least 3 replicas with a lag <= 10 seconds use: -# -# min-replicas-to-write 3 -# min-replicas-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-replicas-to-write is set to 0 (feature disabled) and -# min-replicas-max-lag is set to 10. - -# A KeyDB master is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# KeyDB Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a replica is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may be actually reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -################################## SECURITY ################################### - -# Warning: since KeyDB is pretty fast an outside user can try up to -# 1 million passwords per second against a modern box. This means that you -# should use very strong passwords, otherwise they will be very easy to break. -# Note that because the password is really a shared secret between the client -# and the server, and should not be memorized by any human, the password -# can be easily a long string from /dev/urandom or whatever, so by using a -# long and unguessable password no brute force attack will be possible. - -# KeyDB ACL users are defined in the following format: -# -# user ... acl rules ... -# -# For example: -# -# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 -# -# The special username "default" is used for new connections. If this user -# has the "nopass" rule, then new connections will be immediately authenticated -# as the "default" user without the need of any password provided via the -# AUTH command. Otherwise if the "default" user is not flagged with "nopass" -# the connections will start in not authenticated state, and will require -# AUTH (or the HELLO command AUTH option) in order to be authenticated and -# start to work. -# -# The ACL rules that describe what an user can do are the following: -# -# on Enable the user: it is possible to authenticate as this user. -# off Disable the user: it's no longer possible to authenticate -# with this user, however the already authenticated connections -# will still work. -# + Allow the execution of that command -# - Disallow the execution of that command -# +@ Allow the execution of all the commands in such category -# with valid categories are like @admin, @set, @sortedset, ... -# and so forth, see the full list in the server.c file where -# the KeyDB command table is described and defined. -# The special category @all means all the commands, but currently -# present in the server, and that will be loaded in the future -# via modules. -# +|subcommand Allow a specific subcommand of an otherwise -# disabled command. Note that this form is not -# allowed as negative like -DEBUG|SEGFAULT, but -# only additive starting with "+". -# allcommands Alias for +@all. Note that it implies the ability to execute -# all the future commands loaded via the modules system. -# nocommands Alias for -@all. -# ~ Add a pattern of keys that can be mentioned as part of -# commands. For instance ~* allows all the keys. The pattern -# is a glob-style pattern like the one of KEYS. -# It is possible to specify multiple patterns. -# allkeys Alias for ~* -# resetkeys Flush the list of allowed keys patterns. -# > Add this passowrd to the list of valid password for the user. -# For example >mypass will add "mypass" to the list. -# This directive clears the "nopass" flag (see later). -# < Remove this password from the list of valid passwords. -# nopass All the set passwords of the user are removed, and the user -# is flagged as requiring no password: it means that every -# password will work against this user. If this directive is -# used for the default user, every new connection will be -# immediately authenticated with the default user without -# any explicit AUTH command required. Note that the "resetpass" -# directive will clear this condition. -# resetpass Flush the list of allowed passwords. Moreover removes the -# "nopass" status. After "resetpass" the user has no associated -# passwords and there is no way to authenticate without adding -# some password (or setting it as "nopass" later). -# reset Performs the following actions: resetpass, resetkeys, off, -# -@all. The user returns to the same state it has immediately -# after its creation. -# -# ACL rules can be specified in any order: for instance you can start with -# passwords, then flags, or key patterns. However note that the additive -# and subtractive rules will CHANGE MEANING depending on the ordering. -# For instance see the following example: -# -# user alice on +@all -DEBUG ~* >somepassword -# -# This will allow "alice" to use all the commands with the exception of the -# DEBUG command, since +@all added all the commands to the set of the commands -# alice can use, and later DEBUG was removed. However if we invert the order -# of two ACL rules the result will be different: -# -# user alice on -DEBUG +@all ~* >somepassword -# -# Now DEBUG was removed when alice had yet no commands in the set of allowed -# commands, later all the commands are added, so the user will be able to -# execute everything. -# -# Basically ACL rules are processed left-to-right. -# -# For more information about ACL configuration please refer to -# the Redis web site at https://redis.io/topics/acl - -# Using an external ACL file -# -# Instead of configuring users here in this file, it is possible to use -# a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the exteranl -# ACL file, the server will refuse to start. -# -# The format of the external ACL user file is exactly the same as the -# format that is used inside keydb.conf to describe users. -# -# aclfile /etc/keydb/users.acl - -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity -# layer on top of the new ACL system. The option effect will be just setting -# the password for the default user. Clients will still authenticate using -# AUTH as usually, or more explicitly with AUTH default -# if they follow the new protocol: both will work. -# -# requirepass foobared - -# Command renaming (DEPRECATED). -# -# ------------------------------------------------------------------------ -# WARNING: avoid using this option if possible. Instead use ACLs to remove -# commands from the default user, and put them only in some admin user you -# create for administrative purposes. -# ------------------------------------------------------------------------ -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to replicas may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the KeyDB server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as KeyDB reserves a few file descriptors for internal uses). -# -# Once the limit is reached KeyDB will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached KeyDB will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If KeyDB can't remove keys according to the policy, or if the policy is -# set to 'noeviction', KeyDB will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using KeyDB as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have replicas attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the replicas are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of replicas is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have replicas attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for replica -# output buffers (but this is not needed if the policy is 'noeviction'). -# -maxmemory 1gb - -# MAXMEMORY POLICY: how KeyDB will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, KeyDB will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -maxmemory-policy volatile-lru - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default KeyDB will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -maxmemory-samples 5 - -# Starting from Redis 5, by default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. -# -# This behavior ensures that masters and replicas stay consistent, and is usually -# what you want, however if your replica is writable, or you want the replica to have -# a different memory setting, and you are sure all the writes performed to the -# replica are idempotent, then you may change this default (but be sure to understand -# what you are doing). -# -# Note that since the replica by default does not evict, it may end using more -# memory than the one set via maxmemory (there are certain buffers that may -# be larger on the replica, or data structures may sometimes take more memory and so -# forth). So make sure you monitor your replicas and make sure they have enough -# memory to never hit a real out-of-memory condition before the master hits -# the configured maxmemory setting. -# -# replica-ignore-maxmemory yes - -############################# LAZY FREEING #################################### - -# KeyDB has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in KeyDB. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons KeyDB also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the KeyDB server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically KeyDB deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transferred. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default KeyDB asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the KeyDB process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) KeyDB can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the KeyDB process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup KeyDB will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# KeyDB supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# KeyDB may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of KeyDB is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# KeyDB is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: KeyDB remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the KeyDB -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where KeyDB is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when KeyDB itself -# crashes or aborts but the operating system still works correctly). -# -# KeyDB can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the KeyDB server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "keydb-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# KeyDB will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, KeyDB is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading KeyDB recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -aof-use-rdb-preamble yes - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached KeyDB will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ KEYDB CLUSTER ############################### - -# Normal KeyDB instances can't be part of a KeyDB Cluster; only nodes that are -# started as cluster nodes can. In order to start a KeyDB instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by KeyDB nodes. -# Every KeyDB Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A replica of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a replica to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple replicas able to failover, they exchange messages -# in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). -# Replicas will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the replica will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * replica-validity-factor) + repl-ping-replica-period -# -# So for example if node-timeout is 30 seconds, and the replica-validity-factor -# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a replica at all. -# -# For maximum availability, it is possible to set the replica-validity-factor -# to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-replica-validity-factor 10 - -# Cluster replicas are able to migrate to orphaned masters, that are masters -# that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working replicas. -# -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master -# and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. -# -# Default is 1 (replicas migrate only if their masters remain with at least -# one replica). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default KeyDB Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-replica-no-failover no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, KeyDB Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make KeyDB Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal KeyDB Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The KeyDB Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells KeyDB -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The KeyDB latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a KeyDB instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# KeyDB can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that KeyDB will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### GOPHER SERVER ################################# - -# KeyDB contains an implementation of the Gopher protocol, as specified in -# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). -# -# The Gopher protocol was very popular in the late '90s. It is an alternative -# to the web, and the implementation both server and client side is so simple -# that the KeyDB server has just 100 lines of code in order to implement this -# support. -# -# What do you do with Gopher nowadays? Well Gopher never *really* died, and -# lately there is a movement in order for the Gopher more hierarchical content -# composed of just plain text documents to be resurrected. Some want a simpler -# internet, others believe that the mainstream internet became too much -# controlled, and it's cool to create an alternative space for people that -# want a bit of fresh air. -# -# Anyway for the 10nth birthday of the KeyDB, we gave it the Gopher protocol -# as a gift. -# -# --- HOW IT WORKS? --- -# -# The KeyDB Gopher support uses the inline protocol of KeyDB, and specifically -# two kind of inline requests that were anyway illegal: an empty request -# or any request that starts with "/" (there are no KeyDB commands starting -# with such a slash). Normal RESP2/RESP3 requests are completely out of the -# path of the Gopher protocol implementation and are served as usually as well. -# -# If you open a connection to KeyDB when Gopher is enabled and send it -# a string like "/foo", if there is a key named "/foo" it is served via the -# Gopher protocol. -# -# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher -# talking), you likely need a script like the following: -# -# https://github.com/antirez/gopher2redis -# -# --- SECURITY WARNING --- -# -# If you plan to put KeyDB on the internet in a publicly accessible address -# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. -# Once a password is set: -# -# 1. The Gopher server (when enabled, not by default) will kill serve -# content via Gopher. -# 2. However other commands cannot be called before the client will -# authenticate. -# -# So use the 'requirepass' option to protect your instance. -# -# To enable Gopher support uncomment the following line and set -# the option from no (the default) to yes. -# -# gopher-enabled no - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Streams macro node max size / items. The stream data structure is a radix -# tree of big nodes that encode multiple items inside. Using this configuration -# it is possible to configure how big a single node can be in bytes, and the -# maximum number of items it may contain before switching to a new node when -# appending new stream entries. If any of the following settings are set to -# zero, the limit is ignored, so for instance it is possible to set just a -# max entires limit by setting max-bytes to 0 and max-entries to the desired -# value. -stream-node-max-bytes 4096 -stream-node-max-entries 100 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main KeyDB hash table (the one mapping top-level -# keys to values). The hash table implementation KeyDB uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that KeyDB can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# replica -> replica clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and replica clients, since -# subscribers and replicas receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the KeyDB protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here. -# -# proto-max-bulk-len 512mb - -# KeyDB calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but KeyDB checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# KeyDB is idle, but at the same time will make KeyDB more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# Normally it is useful to have an HZ value which is proportional to the -# number of clients connected. This is useful in order, for instance, to -# avoid too many clients are processed for each background task invocation -# in order to avoid latency spikes. -# -# Since the default HZ value by default is conservatively set to 10, KeyDB -# offers, and enables by default, the ability to use an adaptive HZ value -# which will temporary raise when there are many connected clients. -# -# When dynamic HZ is enabled, the actual configured HZ will be used as -# as a baseline, but multiples of the configured HZ value will be actually -# used as needed once more clients are connected. In this way an idle -# instance will use very little CPU time while a busy instance will be -# more responsive. -dynamic-hz yes - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# When KeyDB saves RDB file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -rdb-save-incremental-fsync yes - -# KeyDB LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the KeyDB LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so KeyDB -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# keydb-benchmark -n 1000000 incr foo -# keydb-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a KeyDB server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) KeyDB will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled KeyDB -# to use the copy of Jemalloc we ship with the source code of KeyDB. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 5 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 - -# Maximum number of set/hash/zset/list fields that will be processed from -# the main dictionary scan -# active-defrag-max-scan-fields 1000 - -# The minimum number of clients on a thread before KeyDB assigns new connections to a different thread -# Tuning this parameter is a tradeoff between locking overhead and distributing the workload over multiple cores -# min-clients-per-thread 50 - -# Path to directory for file backed scratchpad. The file backed scratchpad -# reduces memory requirements by storing rarely accessed data on disk -# instead of RAM. A temporary file will be created in this directory. -# scratch-file-path /tmp/ - -# Number of worker threads serving requests. This number should be related to the performance -# of your network hardware, not the number of cores on your machine. We don't recommend going -# above 4 at this time. By default this is set 1. -server-threads 2 - -# Should KeyDB pin threads to CPUs? By default this is disabled, and KeyDB will not bind threads. -# When enabled threads are bount to cores sequentially starting at core 0. -# server-thread-affinity true - -# Uncomment the option below to enable Active Active support. Note that -# replicas will still sync in the normal way and incorrect ordering when -# bringing up replicas can result in data loss (the first master will win). -# active-replica yes - -# Enable Enterprise? KeyDB Enterprise provides support for Enterprise only features -# note: you may omit the license key to demo Enterprise features for a limited time -# enable-enterprise [License Key] - -# Enable FLASH support? (Enterprise Only) -# storage-provider flash /path/to/flash/db diff --git a/tests/backend_runners/maria/docker-compose.yml b/tests/backend_runners/maria/docker-compose.yml deleted file mode 100644 index 1a43e84..0000000 --- a/tests/backend_runners/maria/docker-compose.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: '3' - -services: - mariadb: - image: mariadb:latest - container_name: some-maria - ports: - - "3306:3306" - environment: - - MARIADB_ROOT_PASSWORD=admin - - MARIADB_DATABASE=maria - volumes: - - ./storage:/var/lib/mysql diff --git a/tests/backend_runners/scylla/docker-compose.yml b/tests/backend_runners/scylla/docker-compose.yml deleted file mode 100644 index c102e4d..0000000 --- a/tests/backend_runners/scylla/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: '3' - -services: - some-scylla: - image: scylladb/scylla - container_name: some-scylla - volumes: - - ./storage/n1:/var/lib/scylla - ports: - - "9042:9042" - - some-scylla2: - image: scylladb/scylla - container_name: some-scylla2 - command: --seeds=some-scylla - volumes: - - ./storage/n2:/var/lib/scylla - - some-scylla3: - image: scylladb/scylla - container_name: some-scylla3 - command: --seeds=some-scylla - volumes: - - ./storage/n3:/var/lib/scylla \ No newline at end of file diff --git a/tests/nginx/Dockerfile b/tests/nginx/Dockerfile deleted file mode 100644 index 47c690b..0000000 --- a/tests/nginx/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM nginx:latest - -# Remove the default Nginx configuration file -RUN rm -v /etc/nginx/nginx.conf - -# Copy a configuration file from the current directory -ADD nginx.conf /etc/nginx/ - -ADD sample /usr/share/nginx/html/ -ADD sample /var/www/html/ - -# Expose ports -EXPOSE 90 diff --git a/tests/nginx/nginx.conf b/tests/nginx/nginx.conf deleted file mode 100644 index 128d000..0000000 --- a/tests/nginx/nginx.conf +++ /dev/null @@ -1,61 +0,0 @@ -user www-data; -worker_processes auto; -pid /run/nginx.pid; -include /etc/nginx/modules-enabled/*.conf; - -events { - worker_connections 1024; -} - -http { - ## - # Basic Settings - ## - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - # server_tokens off; - - # server_names_hash_bucket_size 64; - # server_name_in_redirect off; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - ## - # SSL Settings - ## - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE - ssl_prefer_server_ciphers on; - - ## - # Logging Settings - ## - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; - - ## - # Gzip Settings - ## - - gzip on; - - # gzip_vary on; - # gzip_proxied any; - # gzip_comp_level 6; - # gzip_buffers 16 8k; - # gzip_http_version 1.1; - # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; - - - server { - root /usr/share/nginx/html/; - autoindex on; - listen 90; - } -} \ No newline at end of file diff --git a/tests/nginx/sample/news.png b/tests/nginx/sample/news.png deleted file mode 100644 index 5a71a1d..0000000 Binary files a/tests/nginx/sample/news.png and /dev/null differ diff --git a/tests/noqa/expand.py b/tests/noqa/expand.py deleted file mode 100644 index c847a71..0000000 --- a/tests/noqa/expand.py +++ /dev/null @@ -1,40 +0,0 @@ -import base64 -import aiohttp -import asyncio - -queue = asyncio.Queue() - - -def get_base_data(file: str) -> str: - with open(file, "rb") as file: - data = file.read() - return base64.standard_b64encode(data).decode("utf-8") - - -async def task(): - data = get_base_data("./samples/news.png") - async with aiohttp.ClientSession() as sess: - while not queue.empty(): - _ = await queue.get() - async with sess.post( - "http://127.0.0.1:7070/admin/create/image", - json={"format": "png", "data": data} - ) as resp: - assert resp.status == 200 - await asyncio.sleep(0.2) - - -async def main(): - for _ in range(200_000): - queue.put_nowait(None) - - tasks = [task() for _ in range(1)] - t = asyncio.ensure_future(asyncio.gather(*tasks)) - - while not queue.empty() and not t.done(): - print(f"currently, {queue.qsize()} in queue") - await asyncio.sleep(1) - - -if __name__ == '__main__': - asyncio.run(main()) diff --git a/tests/samples/news.png b/tests/samples/news.png deleted file mode 100644 index 5a71a1d..0000000 Binary files a/tests/samples/news.png and /dev/null differ diff --git a/tests/samples/release.png b/tests/samples/release.png deleted file mode 100644 index cf6ab90..0000000 Binary files a/tests/samples/release.png and /dev/null differ diff --git a/tests/unit.py b/tests/unit.py deleted file mode 100644 index 7d9cd41..0000000 --- a/tests/unit.py +++ /dev/null @@ -1,109 +0,0 @@ -import base64 -import requests -import uuid - -working_ids = {} - - -def get_base_data(file: str) -> str: - with open(file, "rb") as file: - data = file.read() - print(f"original {len(data)}") - return base64.standard_b64encode(data).decode("utf-8") - - -def test_png_upload1(): - global working_ids - data = get_base_data("./samples/sunset.jpeg") - payload = { - "format": "jpeg", - "data": data, - } - r = requests.post("http://127.0.0.1:7070/admin/create/image", json=payload) - data = r.json() - - assert r.status_code == 200 - assert data['data']['category'] == "default" - - file_id = data['data']['file_id'] - working_ids['default'] = file_id - print(file_id) - - -def test_get_img_default(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}") - assert r.status_code == 200 - - -def test_get_img_preset_webp(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=webp") - assert r.status_code == 200 - - -def test_get_img_preset_png(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=png") - assert r.status_code == 200 - - -def test_get_img_preset_jpeg(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=jpeg") - assert r.status_code == 200 - - -def test_get_img_format_gif(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=gif") - assert r.status_code == 200 - - -def test_get_img_preset_large(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=large") - assert r.status_code == 200 - - -def test_get_img_preset_medium(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=medium") - assert r.status_code == 200 - - -def test_get_img_preset_small(): - r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=small") - assert r.status_code == 200 - - -def test_get_nothing1(): - r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}") - assert r.status_code == 404 - - -def test_get_nothing2(): - r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=png") - assert r.status_code == 404 - - -def test_get_nothing3(): - r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=jpeg") - assert r.status_code == 404 - - -def test_get_nothing4(): - r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=webp") - assert r.status_code == 404 - - -def test_get_nothing5(): - r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=gif") - assert r.status_code == 404 - - -def test_remove_img1(): - r = requests.delete( - f"http://127.0.0.1:7070/admin/delete/image/44524a33-c505-476d-b23b-c42de1fd796a") - print(r.content) - assert r.status_code == 200 - - -if __name__ == '__main__': - test_png_upload1() - test_get_img_default() - test_get_nothing1() - # test_remove_img1()