Clear and update base handlers

This commit is contained in:
Harrison Burt 2022-03-26 20:27:32 +00:00
parent 4f561c2adc
commit 74844df51a
27 changed files with 11 additions and 4348 deletions

View File

@ -1,8 +1,8 @@
[package]
name = "lust"
version = "1.3.5"
version = "2.0.0"
authors = ["Harrison Burt <57491488+ChillFish8@users.noreply.github.com>"]
edition = "2018"
edition = "2021"
documentation = "getting-started.md"
readme = "README.md"
license = "MIT"
@ -14,36 +14,18 @@ description = "A fast, auto-optimising image server designed for multiple backen
[dependencies]
webp = { version = "*", path = "./webp" }
image = "0.23"
image = "0.24.1"
base64 = "0.13.0"
bytes = "1"
anyhow = "1"
clap = "2"
clap = "3"
serde_json = "1"
serde_variant = "0.1.0"
async-trait = "0.1.50"
once_cell = "1.7.2"
concread = "0.2.14"
async-trait = "0.1"
once_cell = "1.10.0"
futures = "0.3"
log = "0.4.14"
pretty_env_logger = "0.4.0"
gotham = "0.6.0"
gotham_derive = "0.6.0"
headers = "0.3"
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
chrono = { version = "0.4", features = ["serde"] }
uuid = { version = "0.8.2", features = ["serde", "v4"] }
hashbrown = { version = "0.11.2", features = ["serde"] }
sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "mysql", "sqlite", "postgres", "chrono", "uuid" ] }
redis = { version = "0.20", features = ["tokio-comp", "connection-manager"] }
scylla = "0.2.1"
[profile.release]
lto = "fat"
codegen-units = 1

View File

@ -1,8 +1,8 @@
FROM rust:slim-buster as build
WORKDIR /code
WORKDIR /app
COPY . /code
COPY . /app
RUN cargo build --release
@ -10,7 +10,7 @@ RUN cargo build --release
FROM debian:buster-slim
WORKDIR /etc/lust
COPY --from=build /code/target/release/lust /
COPY --from=build /app/target/release/lust /
USER root
ENTRYPOINT ["./lust", "run"]

View File

@ -3,4 +3,5 @@ combine_control_expr = false
imports_layout = "HorizontalVertical"
match_block_trailing_comma = true
imports_granularity = "Module"
group_imports = "StdExternalCrate"
group_imports = "StdExternalCrate"
max_width = 89

View File

@ -1,379 +0,0 @@
use anyhow::Result;
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
use chrono::{DateTime, NaiveDateTime, Utc};
use hashbrown::HashMap;
use log::{debug, info, warn};
use scylla::query::Query;
use scylla::statement::prepared_statement::PreparedStatement;
use scylla::transport::session::Session;
use scylla::{QueryResult, SessionBuilder};
use serde::{Deserialize, Serialize};
use serde_variant::to_variant_name;
use uuid::Uuid;
use crate::configure::PAGE_SIZE;
use crate::context::{FilterType, IndexResult, OrderBy};
use crate::image::{ImageFormat, ImagePresetsData};
use crate::traits::{DatabaseLinker, ImageStore};
/// Represents a connection pool session with a round robbin load balancer.
type CurrentSession = Session;
type PagedRow = (Uuid, String, i64, i32);
#[derive(Clone, Serialize, Deserialize)]
#[serde(tag = "strategy", content = "spec")]
enum ReplicationClass {
SimpleStrategy(SimpleNode),
NetworkTopologyStrategy(Vec<DataCenterNode>),
}
#[derive(Clone, Serialize, Deserialize)]
struct SimpleNode {
replication_factor: usize,
}
#[derive(Clone, Serialize, Deserialize)]
struct DataCenterNode {
node_name: String,
replication: usize,
}
/// The configuration for a cassandra database.
///
/// Each cluster should be given in the `host:port` format and
/// should only be the main node (not replication nodes).
///
/// The replication_factor is used when the keyspace is first created,
/// if the keyspace already exists this number may be ignored despite
/// being changed due to current implementation limitations.
///
/// The replication_class is used when the keyspace is first created,
/// this has the same caveats as the replication_factor.
#[derive(Clone, Deserialize)]
pub struct DatabaseConfig {
clusters: Vec<String>,
keyspace: ReplicationClass,
user: String,
password: String,
}
macro_rules! log_and_convert_error {
( $e:expr ) => {{
match $e {
Ok(frame) => Some(frame),
Err(e) => {
warn!("failed to execute query {:?}", e);
None
},
}
}};
}
async fn get_page(
filter: &FilterType,
session: &CurrentSession,
stmt: &PreparedStatement,
page_state: Option<Bytes>,
) -> Result<QueryResult> {
Ok(match &filter {
FilterType::All => session.execute_paged(stmt, &[], page_state).await?,
FilterType::CreationDate(v) => {
session
.execute_paged(stmt, (v.to_string(),), page_state)
.await?
},
FilterType::Category(v) => session.execute_paged(stmt, (v,), page_state).await?,
})
}
/// A cassandra database backend.
pub struct Backend {
session: CurrentSession,
check_cat: Option<PreparedStatement>,
get_file: HashMap<String, HashMap<String, PreparedStatement>>,
}
impl Backend {
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
info!("connecting to database");
let session = SessionBuilder::new()
.user(cfg.user, cfg.password)
.known_nodes(cfg.clusters.as_ref())
.build()
.await?;
info!("connect successful");
let replication = match cfg.keyspace {
ReplicationClass::SimpleStrategy(node) => {
format!(
"'class': 'SimpleStrategy', 'replication_factor': {}",
node.replication_factor,
)
},
ReplicationClass::NetworkTopologyStrategy(mut nodes) => {
let mut spec = nodes
.drain(..)
.map(|v| format!("'{}': {}", v.node_name, v.replication))
.collect::<Vec<String>>();
spec.insert(0, "'class' : 'NetworkTopologyStrategy'".to_string());
spec.join(", ")
},
};
let create_ks = format!(
"CREATE KEYSPACE IF NOT EXISTS lust_ks WITH REPLICATION = {{{}}};",
replication
);
debug!("creating keyspace {}", &create_ks);
let _ = session.query(create_ks, &[]).await?;
info!("keyspace ensured");
Ok(Self {
session,
check_cat: None,
get_file: HashMap::new(),
})
}
}
#[async_trait]
impl DatabaseLinker for Backend {
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
info!("building tables");
let query = r#"
CREATE TABLE IF NOT EXISTS lust_ks.image_metadata (
file_id UUID,
category TEXT,
insert_date TIMESTAMP,
total_size BIGINT,
PRIMARY KEY ((file_id), category)
) WITH CLUSTERING ORDER BY (category DESC);
"#;
self.session.query(query, &[]).await?;
info!("metadata table created successfully");
let query = r#"
CREATE INDEX IF NOT EXISTS ON lust_ks.image_metadata (category);
"#;
self.session.query(query, &[]).await?;
info!("metadata table index created successfully");
let mut columns = vec![format!("file_id UUID PRIMARY KEY")];
for format in formats.iter() {
let column = to_variant_name(format).expect("unreachable");
columns.push(format!("{} BLOB", column))
}
for preset in presets {
let query = format!(
"CREATE TABLE IF NOT EXISTS lust_ks.{table} ({columns})",
table = preset,
columns = columns.join(", ")
);
self.session.query(query, &[]).await?;
debug!("created preset table {}", preset);
for format in formats.iter() {
let column = to_variant_name(format).expect("unreachable");
let qry = format!(
"SELECT {column} FROM lust_ks.{table} WHERE file_id = ? LIMIT 1;",
column = column,
table = preset,
);
let prepared = self.session.prepare(qry).await?;
debug!("prepared check query {:?}", format);
if let Some(tbl) = self.get_file.get_mut(preset) {
tbl.insert(column.to_string(), prepared);
} else {
let mut new_map = HashMap::new();
new_map.insert(column.to_string(), prepared);
self.get_file.insert(preset.to_string(), new_map);
}
}
}
info!("tables created");
let qry = r#"
SELECT file_id FROM lust_ks.image_metadata
WHERE file_id = ? AND category = ?;
"#;
let prepared = self.session.prepare(qry).await?;
self.check_cat = Some(prepared);
info!("prepared all queries and tables");
Ok(())
}
}
#[async_trait]
impl ImageStore for Backend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
let prepared = self.check_cat.as_ref().unwrap();
let query_result =
log_and_convert_error!(self.session.execute(prepared, (file_id, category)).await)?;
let _ = query_result.rows?;
let column = to_variant_name(&format).expect("unreachable");
let prepared = self.get_file.get(&preset)?.get(column)?;
let query_result =
log_and_convert_error!(self.session.execute(prepared, (file_id,)).await)?;
let mut rows = query_result.rows?;
let row = rows.pop()?;
let (data,) = log_and_convert_error!(row.into_typed::<(Vec<u8>,)>())?;
let ref_: &[u8] = data.as_ref();
Some(BytesMut::from(ref_))
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let mut total: i64 = 0;
for (preset, preset_data) in data {
let sum: i64 = preset_data.values().map(|v| v.len() as i64).sum();
total += sum;
let columns: String = preset_data
.keys()
.map(|v| to_variant_name(v).expect("unreachable"))
.collect::<Vec<&str>>()
.join(", ");
let placeholders: String = (0..preset_data.len())
.map(|_| "?")
.collect::<Vec<&str>>()
.join(", ");
let mut values: Vec<Vec<u8>> = preset_data.values().map(|v| v.to_vec()).collect();
values.insert(0, file_id.as_bytes().to_vec());
let qry = format!(
"INSERT INTO lust_ks.{table} (file_id, {columns}) VALUES (?, {placeholders});",
table = preset,
columns = columns,
placeholders = placeholders,
);
let prepared = self.session.prepare(qry).await?;
self.session.execute(&prepared, values).await?;
}
let qry = r#"
INSERT INTO lust_ks.image_metadata (
file_id,
category,
insert_date,
total_size
) VALUES (?, ?, ?, ?);"#;
let now = Utc::now();
self.session
.query(qry, (file_id, category, now.timestamp(), total))
.await?;
Ok(())
}
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
for preset in presets {
let qry = format!(
"DELETE FROM lust_ks.{table} WHERE file_id = ?;",
table = preset,
);
self.session
.query(qry, (file_id.as_bytes().to_vec(),))
.await?;
}
let qry = "DELETE FROM lust_ks.image_metadata WHERE file_id = ?;";
self.session.query(qry, (file_id,)).await?;
Ok(())
}
async fn list_entities(
&self,
filter: FilterType,
_order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>> {
let qry = format!(
r#"
SELECT file_id, category, insert_date, total_size
FROM lust_ks.image_metadata
"#,
);
let mut query = match &filter {
FilterType::All => {
let qry = format!("{};", qry);
Query::new(qry)
},
FilterType::CreationDate(_) => {
let qry = format!("{} WHERE insert_date = ?;", qry);
Query::new(qry)
},
FilterType::Category(_) => {
let qry = format!("{} WHERE category = ?;", qry);
Query::new(qry)
},
};
query.set_page_size(PAGE_SIZE as i32);
let prepared = self.session.prepare(query).await?;
let mut page_state = None;
for _ in 0..page - 1 {
let rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?;
page_state = rows.paging_state;
}
let target_rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?;
let results = if let Some(mut rows) = target_rows.rows {
rows.drain(..)
.map(|r| {
let r = r
.into_typed::<PagedRow>()
.expect("database format invalidated");
let res = IndexResult {
file_id: r.0,
category: r.1,
created_on: DateTime::from_utc(NaiveDateTime::from_timestamp(r.2, 0), Utc),
total_size: r.3,
};
res
})
.collect()
} else {
vec![]
};
Ok(results)
}
}

View File

@ -1,3 +0,0 @@
pub mod cql;
pub mod redis;
pub mod sql;

View File

@ -1,148 +0,0 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use anyhow::Result;
use async_trait::async_trait;
use bytes::BytesMut;
use log::error;
use redis::aio::ConnectionManager;
use redis::{AsyncCommands, AsyncIter};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::context::{FilterType, IndexResult, OrderBy};
use crate::image::{ImageFormat, ImagePresetsData};
use crate::traits::{DatabaseLinker, ImageStore};
#[derive(Clone, Serialize, Deserialize)]
pub struct RedisConfig {
connection_uri: String,
pool_size: usize,
}
struct RedisPool {
connections: Vec<ConnectionManager>,
index: AtomicUsize,
}
impl RedisPool {
pub async fn connect(cfg: RedisConfig) -> Result<Self> {
let client = redis::Client::open(cfg.connection_uri)?;
let mut conns = Vec::new();
for _ in 0..cfg.pool_size {
let conn = client.get_tokio_connection_manager().await?;
conns.push(conn);
}
Ok(Self {
connections: conns,
index: AtomicUsize::new(0),
})
}
pub fn get(&self) -> ConnectionManager {
let index = self.index.load(Ordering::Relaxed);
let conn = self.connections[index].clone();
if index == (self.connections.len() - 1) {
self.index.store(0, Ordering::Relaxed);
} else {
self.index.store(index + 1, Ordering::Relaxed);
}
conn
}
}
pub struct Backend {
pool: RedisPool,
}
impl Backend {
pub async fn connect(cfg: RedisConfig) -> Result<Self> {
let pool = RedisPool::connect(cfg).await?;
Ok(Self { pool })
}
}
#[async_trait]
impl DatabaseLinker for Backend {
/// Due to the nature of the key-value setup for redis clients this has completely
/// different handling so does not do anything when this funciton is called.
async fn ensure_tables(
&mut self,
_presets: Vec<&str>,
_columns: Vec<ImageFormat>,
) -> Result<()> {
Ok(())
}
}
#[async_trait]
impl ImageStore for Backend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
let key = format!("{:?} {} {} {:?}", file_id, preset, category, format);
let mut conn = self.pool.get();
let result = conn.get(&key).await;
let val: Vec<u8> = match result {
Ok(v) => v,
Err(e) => {
error!("failed to fetch key {} from redis: {:?}", &key, e);
return None;
},
};
if val.len() == 0 {
None
} else {
let ref_: &[u8] = val.as_ref();
Some(BytesMut::from(ref_))
}
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let mut pairs = Vec::new();
for (preset, formats) in data {
for (format, buff) in formats {
let key = format!("{:?} {} {} {:?}", &file_id, &preset, category, format);
pairs.push((key, buff.to_vec()));
}
}
let mut conn = self.pool.get();
conn.set_multiple(&pairs).await?;
Ok(())
}
async fn remove_image(&self, file_id: Uuid, _presets: Vec<&String>) -> Result<()> {
let mut conn = self.pool.get();
let mut conn2 = self.pool.get();
let mut keys: AsyncIter<String> = conn.scan_match(format!("{:?}*", file_id)).await?;
while let Some(v) = keys.next_item().await {
conn2.del(v).await?;
}
Ok(())
}
/// This is non-functional due to limitations with the key-value setup of redis.
async fn list_entities(
&self,
_filter: FilterType,
_order: OrderBy,
_page: usize,
) -> Result<Vec<IndexResult>> {
Err(anyhow::Error::msg(
"redis backend does not support listing entities",
))
}
}

View File

@ -1,676 +0,0 @@
use std::str::FromStr;
use anyhow::Result;
use async_trait::async_trait;
use bytes::BytesMut;
use chrono::Utc;
use log::{debug, error, info};
use serde::Deserialize;
use serde_variant::to_variant_name;
use sqlx::mysql::{MySqlPool, MySqlPoolOptions};
use sqlx::postgres::{PgPool, PgPoolOptions};
use sqlx::sqlite::{SqlitePool, SqlitePoolOptions};
use sqlx::Row;
use uuid::Uuid;
use crate::configure::PAGE_SIZE;
use crate::context::{FilterType, IndexResult, OrderBy};
use crate::image::{ImageFormat, ImagePresetsData};
use crate::traits::{DatabaseLinker, ImageStore};
/// The configuration for the SQL based database backends.
///
/// The `connection_uri` should be formatted as a direct connect
/// uri. e.g.
/// `postgresql://john:boo@localhost/postgres`
///
/// The `pool_size` determined the *maximum* amount of pool connections.
#[derive(Clone, Deserialize)]
pub struct DatabaseConfig {
connection_uri: String,
pool_size: u32,
}
fn build_select_qry(column: &str, preset: &str, placeholder: &str) -> String {
format!(
"SELECT {column} FROM {table} WHERE file_id = {placeholder} LIMIT 1;",
column = column,
table = preset,
placeholder = placeholder,
)
}
fn build_insert_qry(preset: &str, columns: &Vec<&str>, placeholders: &Vec<String>) -> String {
let columns = columns.join(", ");
let placeholders = placeholders.join(", ");
format!(
"INSERT INTO {table} ({columns}) VALUES ({placeholders});",
table = preset,
columns = columns,
placeholders = placeholders,
)
}
fn build_delete_queries(presets: &Vec<&String>, placeholder: &str) -> Vec<String> {
let mut queries = vec![];
for preset in presets {
queries.push(format!(
"DELETE FROM {table} WHERE file_id = {placeholder};",
table = preset,
placeholder = placeholder,
))
}
queries
}
/// Either extracts the value as a `&[u8]` from the row as `Some(BytesMut)`
/// or becomes `None`.
macro_rules! extract_or_none {
( $e:expr, $c:expr ) => {{
match $e {
Ok(row) => {
let row = row?;
let data: &[u8] = row.get($c);
Some(BytesMut::from(data))
},
Err(e) => {
error!("failed to fetch row due to error: {:?}", e);
None
},
}
}};
}
/// Builds a SQL query for the given preset (table) from
/// the given data adding place holders for each value for
/// prepared statements.
macro_rules! build_insert {
( $preset:expr, $data:expr, $placeholder:expr ) => {{
let mut columns: Vec<&str> = $data
.keys()
.map(|v| to_variant_name(v).expect("unreachable"))
.collect();
columns.insert(0, "file_id");
let values: Vec<BytesMut> = $data.values().map(|v| v.clone()).collect();
let placeholders: Vec<String> = (1..columns.len() + 1).map($placeholder).collect();
(build_insert_qry($preset, &columns, &placeholders), values)
}};
}
/// Builds a sqlx query based on the given query string and values
///
/// This also accounts for the file_id being a uuid vs everything else
/// being bytes.
macro_rules! query_with_parameters {
( $id:expr, $qry:expr, $values:expr ) => {{
let mut qry = sqlx::query($qry).bind($id);
for value in $values {
qry = qry.bind(value)
}
qry
}};
}
/// Deletes a file with a given id from all presets.
///
/// Due to the nature of the Pool types but the similarity between
/// each database code to delete files it makes more sense to put this
/// in a macro over a function.
macro_rules! delete_file {
( $id:expr, $presets:expr, $placeholder:expr, $pool:expr ) => {{
let file_id = $id.to_string();
let queries = build_delete_queries($presets, $placeholder);
for qry in queries {
let query = sqlx::query(&qry).bind(&file_id);
query.execute($pool).await?;
}
let qry = format!(
"DELETE FROM image_metadata WHERE file_id = {}",
$placeholder,
);
let query = sqlx::query(&qry).bind($id.to_string());
query.execute($pool).await?;
}};
}
/// Inserts a given file_id into the index table.
///
/// This table mostly acts as the metadata table for listing files of
/// given categories.
macro_rules! insert_metadata {
( $file_id:expr, $category:expr, $total:expr, $placeholder:expr, $pool:expr, ) => {{
let placeholders: String = (1..5).map($placeholder).collect::<Vec<String>>().join(", ");
let qry = format!(
r#"
INSERT INTO image_metadata (
file_id,
category,
insert_date,
total_size
) VALUES ({placeholders})"#,
placeholders = placeholders,
);
let now = Utc::now();
let query = sqlx::query(&qry)
.bind($file_id)
.bind($category)
.bind(now)
.bind($total);
query.execute($pool).await?;
}};
}
macro_rules! sum_total {
( $total:expr, $values:expr ) => {{
let sum: i64 = $values.values().map(|v| v.len() as i64).sum();
$total += sum;
}};
}
macro_rules! check_category {
( $file_id:expr, $category:expr, $ph1:expr, $ph2:expr, $pool:expr ) => {{
let qry = format!(
"SELECT 1 FROM image_metadata WHERE file_id = {} AND category = {};",
$ph1, $ph2,
);
sqlx::query(&qry)
.bind($file_id.to_string())
.bind($category)
.fetch_optional($pool)
.await
.unwrap_or(None)
}};
}
macro_rules! apply_filter {
( $qry:expr, $placeholder:expr, $filter:expr ) => {{
match $filter {
FilterType::All => (),
FilterType::Category(_) => $qry = format!("{} WHERE category = {}", $qry, $placeholder),
FilterType::CreationDate(_) => {
$qry = format!("{} WHERE insert_date = {}", $qry, $placeholder)
},
};
}};
}
macro_rules! bind_filter {
( $query:expr, $filter:expr ) => {{
match $filter {
FilterType::All => (),
FilterType::Category(v) => $query = $query.bind(v),
FilterType::CreationDate(v) => $query = $query.bind(v),
};
}};
}
macro_rules! from_rows {
( $rows:expr ) => {{
$rows
.drain(..)
.map(|v| IndexResult {
file_id: Uuid::from_str(v.get("file_id")).expect("uuid was invalid in database"),
category: v.get("category"),
total_size: v.get("total_size"),
created_on: v.get("insert_date"),
})
.collect()
}};
}
/// A database backend set to handle the PostgreSQL database.
pub struct PostgresBackend {
pool: PgPool,
}
impl PostgresBackend {
/// Connect to the given PostgreSQL server.
///
/// This will build a connection pool and connect with a maximum
/// of n connections determined by the `pool_size` of the given
/// config.
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
let pool = PgPoolOptions::new()
.max_connections(cfg.pool_size)
.connect(&cfg.connection_uri)
.await?;
Ok(Self { pool })
}
}
#[async_trait]
impl DatabaseLinker for PostgresBackend {
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
info!("building tables");
let query = sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS image_metadata (
file_id CHAR(36) PRIMARY KEY,
category TEXT,
insert_date TIMESTAMP WITH TIME ZONE,
total_size INTEGER
)"#,
);
query.execute(&self.pool).await?;
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
for format in formats {
let column = to_variant_name(&format).expect("unreachable");
columns.push(format!("{} BYTEA", column))
}
for preset in presets {
let qry = format!(
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
table = preset,
columns = columns.join(", ")
);
let query = sqlx::query(&qry);
query.execute(&self.pool).await?;
}
Ok(())
}
}
#[async_trait]
impl ImageStore for PostgresBackend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
check_category!(file_id, category, "$1", "$2", &self.pool)?;
let column = to_variant_name(&format).expect("unreachable");
let qry = build_select_qry(column, &preset, "$1");
let qry = sqlx::query(&qry).bind(file_id.to_string());
extract_or_none!(qry.fetch_optional(&self.pool).await, column)
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let mut total: i64 = 0;
for (preset, preset_data) in data {
sum_total!(total, preset_data);
let (qry, values) = build_insert!(&preset, preset_data, |i| format!("${}", i));
let values_ = values.iter().map(|v| v.as_ref());
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
query.execute(&self.pool).await?;
}
insert_metadata!(
file_id.to_string(),
category,
total,
|i| format!("${}", i),
&self.pool,
);
Ok(())
}
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
delete_file!(file_id, &presets, "$1", &self.pool);
Ok(())
}
async fn list_entities(
&self,
filter: FilterType,
order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>> {
// we start at 1 but the offset should be calculated from 0
let skip = PAGE_SIZE * (page as i64 - 1);
let order = order.as_str();
let mut qry = format!(
r#"
SELECT file_id, category, insert_date, total_size
FROM image_metadata
ORDER BY {} DESC
OFFSET $1
LIMIT $2
"#,
order
);
apply_filter!(qry, "$3", &filter);
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
bind_filter!(query, filter);
let mut rows = query.fetch_all(&self.pool).await?;
let results = from_rows!(rows);
Ok(results)
}
}
/// A database backend set to handle the MySQL / MariaDB database.
pub struct MySQLBackend {
pool: MySqlPool,
}
impl MySQLBackend {
/// Connect to the given MySQL / MariaDB server.
///
/// This will build a connection pool and connect with a maximum
/// of n connections determined by the `pool_size` of the given
/// config.
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
let pool = MySqlPoolOptions::new()
.max_connections(cfg.pool_size)
.connect(&cfg.connection_uri)
.await?;
Ok(Self { pool })
}
}
#[async_trait]
impl DatabaseLinker for MySQLBackend {
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
info!("building tables");
let query = sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS image_metadata (
file_id CHAR(36) PRIMARY KEY,
category TEXT,
insert_date TIMESTAMP,
total_size INTEGER
)"#,
);
query.execute(&self.pool).await?;
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
for format in formats {
let column = to_variant_name(&format).expect("unreachable");
columns.push(format!("{} LONGBLOB", column))
}
for preset in presets {
let qry = format!(
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
table = preset,
columns = columns.join(", ")
);
let query = sqlx::query(&qry);
query.execute(&self.pool).await?;
}
Ok(())
}
}
#[async_trait]
impl ImageStore for MySQLBackend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
check_category!(file_id, category, "?", "?", &self.pool)?;
let column = to_variant_name(&format).expect("unreachable");
let qry = build_select_qry(column, &preset, "?");
let query = sqlx::query(&qry).bind(file_id.to_string());
extract_or_none!(query.fetch_optional(&self.pool).await, column)
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let mut total: i64 = 0;
for (preset, preset_data) in data {
sum_total!(total, preset_data);
let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string());
let values_ = values.iter().map(|v| v.as_ref());
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
query.execute(&self.pool).await?;
}
insert_metadata!(
file_id.to_string(),
category,
total,
|_| "?".to_string(),
&self.pool,
);
Ok(())
}
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
delete_file!(file_id, &presets, "?", &self.pool);
Ok(())
}
async fn list_entities(
&self,
filter: FilterType,
order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>> {
// we start at 1 but the offset should be calculated from 0
let skip = PAGE_SIZE * (page as i64 - 1);
let order = order.as_str();
let mut qry = format!(
r#"
SELECT file_id, category, insert_date, total_size
FROM image_metadata
ORDER BY {} DESC
LIMIT ?, ?
"#,
order
);
apply_filter!(qry, "?", &filter);
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
bind_filter!(query, filter);
let mut rows = query.fetch_all(&self.pool).await?;
let results = from_rows!(rows);
Ok(results)
}
}
/// A database backend set to handle the Sqlite database.
///
/// Due to the nature of SQLite this is *not* recommended for use
/// in production being a single file. Consider using something like
/// PostgreSQL or Cassandra in production.
///
/// This backend requires that the system uses a standard File approach e.g.
/// not im memory / shared memory due to the sqlx::Pool handling.
/// If in-memory is used this can produce undefined behaviour in terms
/// of what data is perceived to be stored.
pub struct SqliteBackend {
pool: SqlitePool,
}
impl SqliteBackend {
/// Connect to the given Sqlite file.
///
/// This will build a connection pool and connect with a maximum
/// of n connections determined by the `pool_size` of the given
/// config.
///
/// Due to the nature of this being a pool setup, in-memory setups are
/// not supported.
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
let pool = SqlitePoolOptions::new()
.max_connections(cfg.pool_size)
.connect(&cfg.connection_uri)
.await?;
info!("successfully connected to sqlite");
Ok(Self { pool })
}
}
#[async_trait]
impl DatabaseLinker for SqliteBackend {
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
info!("building tables");
let query = sqlx::query(
"
CREATE TABLE IF NOT EXISTS image_metadata (
file_id CHAR(36) PRIMARY KEY,
category TEXT,
insert_date TEXT,
total_size INTEGER
)",
);
query.execute(&self.pool).await?;
info!("metadata table created successfully");
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
for format in formats {
let column = to_variant_name(&format).expect("unreachable");
columns.push(format!("{} BLOB", column))
}
for preset in presets {
let qry = format!(
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
table = preset,
columns = columns.join(", ")
);
let query = sqlx::query(&qry);
query.execute(&self.pool).await?;
debug!("created preset table {}", preset);
}
info!("all preset tables created successfully");
Ok(())
}
}
#[async_trait]
impl ImageStore for SqliteBackend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
check_category!(file_id, category, "?", "?", &self.pool)?;
let column = to_variant_name(&format).expect("unreachable");
let qry = build_select_qry(column, &preset, "?");
let query = sqlx::query(&qry).bind(file_id.to_string());
extract_or_none!(query.fetch_optional(&self.pool).await, column)
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let mut total: i64 = 0;
for (preset, preset_data) in data {
sum_total!(total, preset_data);
let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string());
let values_ = values.iter().map(|v| v.as_ref());
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
query.execute(&self.pool).await?;
}
insert_metadata!(
file_id.to_string(),
category,
total,
|_| "?".to_string(),
&self.pool,
);
Ok(())
}
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
delete_file!(file_id, &presets, "?", &self.pool);
Ok(())
}
async fn list_entities(
&self,
filter: FilterType,
order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>> {
// we start at 1 but the offset should be calculated from 0
let skip = PAGE_SIZE * (page as i64 - 1);
let order = match order {
OrderBy::CreationDate => "datetime(insert_date)",
OrderBy::TotalSize => "total_size",
};
let mut qry = format!(
r#"
SELECT file_id, category, insert_date, total_size
FROM image_metadata
ORDER BY {} DESC
LIMIT ?, ?;
"#,
order
);
apply_filter!(qry, "?", &filter);
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
bind_filter!(query, filter);
let mut rows = query.fetch_all(&self.pool).await?;
let results = from_rows!(rows);
Ok(results)
}
}

View File

@ -1,58 +0,0 @@
use std::sync::Arc;
use bytes::BytesMut;
use concread::arcache::{ARCache, ARCacheBuilder};
use once_cell::sync::OnceCell;
use uuid::Uuid;
use crate::image::ImageFormat;
/// The key that acts as the hashed key.
pub type CacheKey = (Uuid, String, ImageFormat);
/// Cheaply cloneable lock around a LRU cache.
pub type CacheStore = Arc<ARCache<CacheKey, BytesMut>>;
pub static CACHE_STATE: OnceCell<CacheState> = OnceCell::new();
/// A wrapper around the `CacheStore` type letting it be put into Gotham's
/// shared state.
#[derive(Clone)]
pub struct CacheState(pub Option<CacheStore>);
impl CacheState {
/// Creates a new cache state instance with a given size.
pub fn init(cache_size: usize) {
let inst = if cache_size == 0 {
Self { 0: None }
} else {
let store = Arc::new(ARCacheBuilder::new()
.set_size(cache_size, 12)
.build()
.unwrap()
);
Self { 0: Some(store) }
};
let _ = CACHE_STATE.set(inst);
}
/// Get a item from the cache if it exists otherwise returns None.
pub fn get(&self, file_id: Uuid, preset: String, format: ImageFormat) -> Option<BytesMut> {
let state = self.0.as_ref()?;
let ref_val = (file_id, preset, format);
let mut target = state.read();
target.get(&ref_val).map(|v| v.clone())
}
/// Adds an item to the cache, if the cache size is already at it's limit
/// the least recently used (LRU) item is removed.
pub fn set(&self, file_id: Uuid, preset: String, format: ImageFormat, data: BytesMut) {
if let Some(state) = self.0.as_ref() {
let ref_val = (file_id, preset, format);
let mut target = state.write();
target.insert(ref_val, data);
target.commit();
}
}
}

View File

@ -1,147 +0,0 @@
use std::fs::read_to_string;
use std::sync::Arc;
use gotham_derive::StateData;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use crate::image::ImageFormat;
use crate::storage::DatabaseBackend;
/// The size of the pages when listing indexes via the admin panel.
pub const PAGE_SIZE: i64 = 50;
/// A cheaply cloneable version of the given configuration
/// for shared state middleware.
#[derive(Clone, StateData)]
pub struct StateConfig(pub Arc<Config>);
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LogLevel {
Off,
Info,
Debug,
Error,
}
/// A given size of a preset.
/// Any uploaded images will be automatically duplicated and resized in this
/// preset.
#[derive(Deserialize)]
pub struct SizingPreset {
pub width: u32,
pub height: u32,
}
#[derive(Deserialize)]
pub struct Config {
pub log_level: LogLevel,
pub host: String,
pub port: u16,
pub base_data_path: String,
pub formats: HashMap<ImageFormat, bool>,
pub database_backend: DatabaseBackend,
pub size_presets: HashMap<String, SizingPreset>,
pub default_serving_preset: String,
pub default_serving_format: ImageFormat,
pub webp_quality: Option<f32>,
pub webp_compression: Option<f32>,
pub webp_method: Option<u8>,
pub webp_threading: Option<bool>,
pub cache_size: usize,
}
impl Config {
pub fn from_file(file: &str) -> anyhow::Result<Self> {
let data = read_to_string(file)?;
Ok(serde_json::from_str::<Self>(&data)?)
}
pub fn template(backend: &str) -> anyhow::Result<serde_json::Value> {
let config = match backend.to_lowercase().as_str() {
"redis" => json!({
"type": "redis",
"config": {
"connection_uri": "redis://user:pass@localhost/0",
"pool_size": 12,
}
}),
"cassandra" => json!({
"type": "cassandra",
"config": {
"clusters": [
"ip:port",
"ip:port",
"ip:port",
],
"keyspace": {
"strategy": "SimpleStrategy",
"spec": {
"replication_factor": 3
}
},
"user": "",
"password": "",
}
}),
"postgres" => json!({
"type": "postgres",
"config": {
"connection_uri": "postgres://user:pass@localhost/foo",
"pool_size": 10,
}
}),
"mysql" => json!({
"type": "mysql",
"config": {
"connection_uri": "mysql://user:pass@localhost/foo",
"pool_size": 10,
}
}),
"sqlite" => json!({
"type": "sqlite",
"config": {
"connection_uri": "sqlite://database.db",
"pool_size": 10,
}
}),
_ => return Err(anyhow::Error::msg("invalid database backend given")),
};
Ok(json!({
"log_level": LogLevel::Info,
"host": "127.0.0.1",
"port": 7070,
"base_data_path": "/images",
"formats": {
"png": true,
"jpeg": true,
"gif": false,
"webp": true,
},
"database_backend": config,
"size_presets": {
"small": {
"width": 32,
"height": 32,
},
"medium": {
"width": 64,
"height": 64,
},
"large": {
"width": 128,
"height": 128,
},
},
"default_serving_preset": "original",
"default_serving_format": "webp",
"webp_quality": None::<f32>,
"webp_compression": Some(50),
"webp_method": Some(4),
"webp_threading": Some(true),
"cache_size": 500,
}))
}
}

View File

@ -1,58 +0,0 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// A set of filters that can be used to view
/// entities via the REST API on the admin panel.
///
/// Example:
///
/// ```json
/// {
/// "filter": {
/// "filter_type": "category",
/// "with_value": "cats",
/// }
/// }
/// ```
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase", tag = "filter_type", content = "with_value")]
pub enum FilterType {
All,
Category(String),
CreationDate(DateTime<Utc>),
}
/// How the data should be ordered when requesting the
/// index list.
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum OrderBy {
CreationDate,
TotalSize,
}
impl OrderBy {
pub fn as_str(&self) -> &str {
match self {
OrderBy::CreationDate => "insert_date",
OrderBy::TotalSize => "total_size",
}
}
}
/// A result when listing all items in the server.
#[derive(Serialize)]
pub struct IndexResult {
pub file_id: Uuid,
pub category: String,
pub total_size: i32,
pub created_on: DateTime<Utc>,
}
#[derive(Deserialize)]
pub struct FilesListPayload {
pub filter: Option<FilterType>,
pub order: Option<OrderBy>,
pub page: Option<usize>,
}

View File

@ -1,241 +0,0 @@
use std::sync::Arc;
use std::time::Instant;
use anyhow::Result;
use bytes::{BufMut, BytesMut};
use gotham::state::{FromState, State};
use gotham_derive::{StateData, StaticResponseExtender};
use hashbrown::HashMap;
use image::{imageops, load_from_memory_with_format, DynamicImage};
use log::{debug, error};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use webp::Encoder;
use crate::configure::StateConfig;
use crate::storage::StorageBackend;
use crate::traits::ImageStore;
pub type ImageData = HashMap<ImageFormat, BytesMut>;
pub type ImagePresetsData = HashMap<String, ImageData>;
pub type ImageDataSizes = HashMap<ImageFormat, usize>;
pub type ImagePresetDataSizes = HashMap<String, ImageDataSizes>;
#[derive(Debug, Clone, Ord, PartialOrd, Hash, Eq, PartialEq, Serialize, Deserialize, Copy)]
#[serde(rename_all = "lowercase")]
pub enum ImageFormat {
Png,
Jpeg,
Gif,
WebP,
}
#[derive(Deserialize, StateData, StaticResponseExtender)]
pub struct ImageGet {
pub format: Option<ImageFormat>,
pub encode: Option<bool>,
pub preset: Option<String>,
}
#[derive(Deserialize)]
pub struct ImageUpload {
pub format: ImageFormat,
pub data: String,
pub category: Option<String>,
}
#[derive(Serialize)]
pub struct ImageUploaded {
pub file_id: Uuid,
pub formats: ImagePresetDataSizes,
pub category: String,
}
#[derive(Deserialize, StateData, StaticResponseExtender)]
pub struct ImageRemove {
pub file_id: Uuid,
}
macro_rules! convert {
( $e:expr, $d:expr ) => {{
|| -> anyhow::Result<BytesMut> {
let buff = BytesMut::new();
let mut writer = buff.writer();
let start = Instant::now();
$e.write_to(&mut writer, $d)?;
debug!("format {:?} conversion took {:?}", $d, start.elapsed());
Ok(writer.into_inner())
}()
}};
}
macro_rules! generate {
( $n:expr, $e:expr, $hm1:expr, $hm2:expr, $cfg:expr ) => ({
let (data, sizes) = convert_image($e, $cfg).await?;
$hm1.insert($n.to_string(), sizes);
$hm2.insert($n.to_string(), data);
})
}
macro_rules! is_enabled {
( $format:expr, $options:expr ) => {{
$options.get(&$format).map(|v| *v).unwrap_or(true)
}};
}
macro_rules! log_err {
( $result:expr, $msg:expr ) => {{
match &$result {
Ok(_) => (),
Err(e) => error!("{} {:?}", $msg, e),
};
$result
}};
}
fn spawn_conversion(
img: Arc<DynamicImage>,
format: ImageFormat,
convert_to_format: image::ImageFormat,
) -> Result<(ImageFormat, BytesMut)> {
let img: BytesMut = log_err!(
convert!(img, convert_to_format),
format!("failed to convert {:?}: ", convert_to_format)
)?;
return Ok((format, img));
}
async fn convert_image(
img: Arc<DynamicImage>,
cfg: StateConfig,
) -> Result<(ImageData, ImageDataSizes)> {
let mut resulting_sizes = HashMap::with_capacity(4);
let mut resulting_data = HashMap::with_capacity(4);
let mut handles = vec![];
if is_enabled!(ImageFormat::Png, cfg.0.formats) {
let cloned = img.clone();
let handle = tokio::task::spawn_blocking(move || {
spawn_conversion(cloned, ImageFormat::Png, image::ImageFormat::Png)
});
handles.push(handle);
}
if is_enabled!(ImageFormat::Jpeg, cfg.0.formats) {
let cloned = img.clone();
let handle = tokio::task::spawn_blocking(move || {
spawn_conversion(cloned, ImageFormat::Jpeg, image::ImageFormat::Jpeg)
});
handles.push(handle);
}
if is_enabled!(ImageFormat::Gif, cfg.0.formats) {
let cloned = img.clone();
let handle = tokio::task::spawn_blocking(move || {
spawn_conversion(cloned, ImageFormat::Gif, image::ImageFormat::Gif)
});
handles.push(handle);
}
// This is the slowest conversion, maybe change??
// Updated: New encoder allows for multi threading encoding.
if is_enabled!(ImageFormat::WebP, cfg.0.formats) {
let cloned = img.clone();
let handle = tokio::task::spawn_blocking(move || -> Result<(ImageFormat, BytesMut)> {
let start = Instant::now();
let raw = Encoder::from_image(cloned.as_ref()).encode();
debug!(
"format {:?} conversion took {:?}",
image::ImageFormat::WebP,
start.elapsed()
);
let webp = BytesMut::from(raw.as_ref());
Ok((ImageFormat::WebP, webp))
});
handles.push(handle);
}
for handle in handles {
let (format, data) = handle.await??;
resulting_sizes.insert(format, data.len());
resulting_data.insert(format, data);
}
Ok((resulting_data, resulting_sizes))
}
pub async fn process_new_image(
state: &mut State,
category: &str,
format: ImageFormat,
data: Vec<u8>,
) -> Result<(Uuid, ImagePresetDataSizes)> {
let cfg = StateConfig::take_from(state);
let storage = StorageBackend::take_from(state);
let fmt = match format {
ImageFormat::Png => image::ImageFormat::Png,
ImageFormat::Jpeg => image::ImageFormat::Jpeg,
ImageFormat::Gif => image::ImageFormat::Gif,
ImageFormat::WebP => image::ImageFormat::WebP,
};
let presets = &cfg.0.size_presets;
let mut converted_sizes = HashMap::with_capacity(presets.len());
let mut converted_data = HashMap::with_capacity(presets.len());
let original = Arc::from(log_err!(
load_from_memory_with_format(&data, fmt),
"failed to load format due to exception: "
)?);
generate!(
"original",
original.clone(),
converted_sizes,
converted_data,
cfg.clone()
);
for (preset_name, size) in presets {
let cloned = original.clone();
let im = Arc::new(cloned.resize(size.width, size.height, imageops::FilterType::Nearest));
generate!(
preset_name,
im,
converted_sizes,
converted_data,
cfg.clone()
);
}
let file_id = Uuid::new_v4();
storage.add_image(file_id, category, converted_data).await?;
Ok((file_id, converted_sizes))
}
pub async fn get_image(
state: &mut State,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
let storage = StorageBackend::take_from(state);
storage.get_image(file_id, preset, category, format).await
}
pub async fn delete_image(state: &mut State, file_id: Uuid) -> Result<()> {
let storage = StorageBackend::take_from(state);
let cfg = StateConfig::take_from(state);
let presets = cfg.0.size_presets.keys().collect();
storage.remove_image(file_id, presets).await?;
Ok(())
}

View File

@ -1,250 +0,0 @@
mod backends;
mod cache;
mod configure;
mod context;
mod image;
mod response;
mod routes;
mod storage;
mod traits;
#[macro_use]
extern crate serde_json;
use std::net::SocketAddr;
use std::sync::Arc;
use anyhow::Result;
use clap::{App, Arg, ArgMatches, SubCommand};
use gotham::middleware::logger::SimpleLogger as GothSimpleLogger;
use gotham::middleware::state::StateMiddleware;
use gotham::pipeline::new_pipeline;
use gotham::pipeline::single::single_pipeline;
use gotham::router::builder::{build_router, DefineSingleRoute, DrawRoutes};
use gotham::router::Router;
use gotham_derive::{StateData, StaticResponseExtender};
use log::{info, LevelFilter};
use serde::Deserialize;
use tokio::fs;
use uuid::Uuid;
use crate::configure::{LogLevel, StateConfig};
use crate::image::{ImageFormat, ImageGet, ImageRemove};
use crate::storage::{DatabaseBackend, StorageBackend};
use crate::traits::DatabaseLinker;
/// A regex string for validating uuids in the request path.
static UUID_REGEX: &str =
"[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$";
/// A regex for separating out the category sections
static CATEGORY_REGEX: &str = "[a-zA-Z0-9]+";
/// A simple extractor for taking the file_id out of the path
/// of the request as a UUID.
#[derive(Deserialize, StateData, StaticResponseExtender)]
struct PathExtractor {
category: Option<String>,
file_id: Uuid,
}
/// Constructs all the routes for the server.
fn router(backend: storage::StorageBackend, config: StateConfig) -> Result<Router> {
let base = config.0.base_data_path.clone();
let cache_size = config.0.cache_size;
cache::CacheState::init(cache_size);
let pipeline = new_pipeline()
.add(GothSimpleLogger::new(log::Level::Info))
.add(StateMiddleware::new(backend))
.add(StateMiddleware::new(config))
.build();
let (chain, pipelines) = single_pipeline(pipeline);
Ok(build_router(chain, pipelines, |route| {
route
.get(&format!("{}/:file_id:{}", base, UUID_REGEX))
.with_path_extractor::<PathExtractor>()
.with_query_string_extractor::<ImageGet>()
.to_async(routes::get_file);
route
.get(&format!(
"{}/:category:{}/:file_id:{}",
base, CATEGORY_REGEX, UUID_REGEX
))
.with_path_extractor::<PathExtractor>()
.with_query_string_extractor::<ImageGet>()
.to_async(routes::get_file);
route.post("admin/create/image").to_async(routes::add_file);
route
.delete(&format!("admin/delete/image/:file_id:{}", UUID_REGEX))
.with_path_extractor::<ImageRemove>()
.to_async(routes::remove_file);
route.post("admin/list").to_async(routes::list_files);
}))
}
/// This will initialise the logger as well as
/// start server and parse args (although not in that order).
#[tokio::main]
async fn main() -> Result<()> {
let cli_args = parse_args();
let (name, args) = cli_args.subcommand();
match name {
"init" => run_init(args.unwrap()).await,
"run" => run_server(args.unwrap()).await,
other => {
return Err(anyhow::Error::msg(format!(
"command {} is not supported, only commands (init, run) are supported",
other,
)))
},
}?;
Ok(())
}
async fn run_init(args: &ArgMatches<'_>) -> Result<()> {
let target_backend = args.value_of("backend").expect("backend value not given");
let example = configure::Config::template(target_backend)?;
let out = serde_json::to_string_pretty(&example)?;
fs::write("./config.json", out).await?;
Ok(())
}
async fn run_server(args: &ArgMatches<'_>) -> Result<()> {
let cfg = if let Some(cfg) = args.value_of("config") {
configure::Config::from_file(cfg)
} else {
return Err(anyhow::Error::msg(
"missing required config file, exiting...",
));
}?;
let (goth_lvl, lust_lvl) = match cfg.log_level {
LogLevel::Off => (LevelFilter::Off, LevelFilter::Off),
LogLevel::Info => (LevelFilter::Info, LevelFilter::Info),
LogLevel::Debug => (LevelFilter::Info, LevelFilter::Debug),
LogLevel::Error => (LevelFilter::Error, LevelFilter::Error),
};
if std::env::var_os("RUST_LOG").is_none() {
std::env::set_var("RUST_LOG", format!("warn,lust={},gotham={}", lust_lvl, goth_lvl));
}
pretty_env_logger::init();
let lossless = cfg.webp_quality.is_none();
let quality = if lossless {
cfg.webp_compression.unwrap_or(50f32)
} else {
cfg.webp_quality.unwrap()
};
let threading = cfg.webp_threading.unwrap_or(true);
let method = cfg.webp_method.unwrap_or(4) as i32;
info!(
"setting up webp state. \
Lossless: {}, \
Quality: {}, \
Method: {}, \
Threading: {}",
lossless, quality, method, threading
);
webp::init_global(lossless, quality, method, threading);
let fields: Vec<ImageFormat> = cfg
.formats
.iter()
.filter_map(
|(format, enabled)| {
if *enabled {
Some(*format)
} else {
None
}
},
)
.collect();
let mut presets: Vec<&str> = cfg.size_presets.keys().map(|v| v.as_str()).collect();
presets.push("original");
let backend: StorageBackend = match cfg.database_backend.clone() {
DatabaseBackend::Redis(db_cfg) => {
let mut db = backends::redis::Backend::connect(db_cfg).await?;
db.ensure_tables(presets, fields).await?;
let _ = storage::REDIS.set(db);
StorageBackend::Redis
},
DatabaseBackend::Cassandra(db_cfg) => {
let mut db = backends::cql::Backend::connect(db_cfg).await?;
db.ensure_tables(presets, fields).await?;
let _ = storage::CASSANDRA.set(db);
StorageBackend::Cassandra
},
DatabaseBackend::Postgres(db_cfg) => {
let mut db = backends::sql::PostgresBackend::connect(db_cfg).await?;
db.ensure_tables(presets, fields).await?;
let _ = storage::POSTGRES.set(db);
StorageBackend::Postgres
},
DatabaseBackend::MySQL(db_cfg) => {
let mut db = backends::sql::MySQLBackend::connect(db_cfg).await?;
db.ensure_tables(presets, fields).await?;
let _ = storage::MYSQL.set(db);
StorageBackend::MySQL
},
DatabaseBackend::Sqlite(db_cfg) => {
let mut db = backends::sql::SqliteBackend::connect(db_cfg).await?;
db.ensure_tables(presets, fields).await?;
let _ = storage::SQLITE.set(db);
StorageBackend::Sqlite
},
};
let addr: SocketAddr = format!("{}:{}", &cfg.host, cfg.port).parse()?;
let state_cfg = StateConfig(Arc::new(cfg));
let _ = gotham::init_server(addr, router(backend, state_cfg)?).await;
Ok(())
}
fn parse_args() -> ArgMatches<'static> {
App::new("Lust")
.version("0.1.0")
.author("Harrison Burt <hburt2003@gmail.com>")
.about("A powerful automatic image server.")
.subcommand(
SubCommand::with_name("init")
.about("Initialises the workspace with a configuration file")
.version("0.1.0")
.arg(
Arg::with_name("backend")
.short("b")
.long("backend")
.help("The target database backend")
.takes_value(true)
.required(true),
),
)
.subcommand(
SubCommand::with_name("run")
.about("Runs the server with the given configuration")
.version("0.1.0")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.help("The path to a given config file in JSON format.")
.takes_value(true)
.default_value("config.json"),
),
)
.get_matches()
}

View File

@ -1,54 +0,0 @@
use bytes::BytesMut;
use gotham::hyper::http::{header, Response, StatusCode};
use gotham::hyper::Body;
use headers::{ContentType, HeaderMapExt};
use serde_json::Value;
use crate::image::ImageFormat;
/// A standard JSON response with the content type set to application/json
pub fn json_response(status: StatusCode, data: Option<Value>) -> Response<Body> {
let payload = json!({
"status": status.as_u16(),
"data": data,
});
let mut resp = Response::builder()
.status(status)
.body(Body::from(serde_json::to_vec(&payload).unwrap()))
.unwrap();
resp.headers_mut().typed_insert(ContentType::json());
resp
}
pub fn image_response(format: ImageFormat, data: BytesMut) -> Response<Body> {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(Body::from(data.to_vec()))
.unwrap();
let content_type = match format {
ImageFormat::Png => "image/png",
ImageFormat::Jpeg => "image/jpeg",
ImageFormat::Gif => "image/gif",
ImageFormat::WebP => "image/webp",
};
resp.headers_mut()
.insert(header::CONTENT_TYPE, content_type.parse().unwrap());
resp
}
pub fn empty_response(status: StatusCode) -> Response<Body> {
let mut resp = Response::builder()
.status(status)
.body(Body::from(Vec::new()))
.unwrap();
resp.headers_mut().typed_insert(ContentType::text_utf8());
resp
}

View File

@ -1,290 +0,0 @@
use base64::{decode, encode};
use gotham::handler::HandlerResult;
use gotham::hyper::http::StatusCode;
use gotham::hyper::{body, Body};
use gotham::state::{FromState, State};
use log::{debug, error};
use crate::cache::CACHE_STATE;
use crate::configure::StateConfig;
use crate::context::{FilesListPayload, FilterType, OrderBy};
use crate::image::{
delete_image,
get_image,
process_new_image,
ImageGet,
ImageRemove,
ImageUpload,
ImageUploaded,
};
use crate::response::{empty_response, image_response, json_response};
use crate::storage::StorageBackend;
use crate::traits::ImageStore;
use crate::PathExtractor;
macro_rules! from_body {
( $e:expr ) => {{
let res = body::to_bytes(Body::take_from(&mut $e)).await;
let bod = match res {
Ok(bod) => bod,
Err(e) => {
error!("failed to read data from body {:?}", &e);
return Ok((
$e,
json_response(
StatusCode::INTERNAL_SERVER_ERROR,
Some(json!({
"message": format!("encountered exception: {:?}", e)
})),
),
));
}
};
match serde_json::from_slice(bod.as_ref()) {
Ok(v) => v,
Err(e) => {
return Ok((
$e,
json_response(
StatusCode::UNPROCESSABLE_ENTITY,
Some(json!({
"message":
format!(
"failed to deserialize POST body due to the following error: {:?}",
e
)
})),
),
))
}
}
}};
}
/// Gets a given image from the storage backend with the given
/// preset and format if it does not already exist in cache.
///
/// This endpoint can return any of the following status codes:
///
/// 404:
/// The image does not exist, NOTE: This endpoint will **always**
/// return a 404 if an unexpected error was encountered rather than
/// raising an error to the requester, instead it will be logged in
/// the console.
///
/// 200:
/// The image was successfully fetched and sent as the response.
///
/// TODO:
/// Likely performance issues could become apparent at higher
/// concurrency due to the Mutex on the LRU cache, although this
/// is probably insignificant compared to the time spent on IO.
pub async fn get_file(mut state: State) -> HandlerResult {
let path_vars = PathExtractor::take_from(&mut state);
let params = ImageGet::take_from(&mut state);
let config = StateConfig::take_from(&mut state);
let file_id = path_vars.file_id;
let category = path_vars.category.unwrap_or_else(|| "default".to_string());
let format = params
.format
.unwrap_or_else(|| config.0.default_serving_format.clone());
let mut preset = params
.preset
.unwrap_or_else(|| config.0.default_serving_preset.clone());
if preset != "original" {
// We dont want to necessarily error if you give an invalid
// preset, but we dont want to attempt something that doesnt
// exist.
if !config.0.size_presets.contains_key(&preset) {
preset = "original".into();
}
}
let cache = CACHE_STATE.get().expect("not initialised");
let img = if let Some(cached) = cache.get(file_id, preset.clone(), format) {
debug!(
"using cached version of image for file_id: {}, preset: {}, format: {:?}",
file_id, &preset, format,
);
Some(cached)
} else {
debug!(
"using backend version of image for file_id: {}, preset: {}, format: {:?}",
file_id, &preset, format,
);
if let Some(data) = get_image(&mut state, file_id, preset.clone(), &category, format).await
{
cache.set(file_id, preset, format, data.clone());
Some(data)
} else {
None
}
};
match img {
None => Ok((state, empty_response(StatusCode::NOT_FOUND))),
Some(data) => {
if params.encode.unwrap_or(false) {
let encoded = encode(data.as_ref());
return Ok((
state,
json_response(
StatusCode::OK,
Some(json!({
"image": encoded,
})),
),
));
}
Ok((state, image_response(format, data)))
},
}
}
/// Handles a POST request for adding a image to the store.
///
/// The image payload must be in JSON format and be base64 encoded in
/// the standard specification.
///
/// E.g.
/// ```json
/// {
/// "format": "png",
/// "data": "...data ensues..."
/// }
/// ```
pub async fn add_file(mut state: State) -> HandlerResult {
let upload: ImageUpload = from_body!(state);
let format = upload.format;
let data = match decode(upload.data) {
Ok(d) => d,
Err(_) => {
return Ok((
state,
json_response(
StatusCode::UNPROCESSABLE_ENTITY,
Some(json!({
"message": "data is not encoded in base64 format correctly",
})),
),
))
},
};
let category = upload.category.unwrap_or_else(|| "default".to_string());
let (file_id, formats) = match process_new_image(&mut state, &category, format, data).await {
Ok(v) => v,
Err(e) => {
return Ok((
state,
json_response(
StatusCode::INTERNAL_SERVER_ERROR,
Some(json!({
"message": format!("failed to process image: {:?}", e),
})),
),
));
},
};
let resp = ImageUploaded {
file_id,
formats,
category,
};
let resp = serde_json::to_value(resp).expect("failed to serialize uploaded stats");
Ok((state, json_response(StatusCode::OK, Some(resp))))
}
/// Handles removing a image from the store.
///
/// This removes the image from both the database backend and
/// the cache if it exists in there.
///
/// This only requires the UUID of the image no other information
/// is needed.
///
/// Note on semantics:
/// This endpoint does not check if the image exists or not,
/// it simply tries to remove it if it exists otherwise ignores it.
///
/// For that reason this will always return 200 if no exceptions
/// happened at the time.
///
/// This endpoint can return any of the following responses:
///
/// 500:
/// The server could not complete the request due to a unexpected
/// exception, this is typically only possible via the transaction
/// on the database backend failing.
///
/// 200:
/// The image has been removed successfully.
pub async fn remove_file(mut state: State) -> HandlerResult {
let params = ImageRemove::take_from(&mut state);
if let Err(e) = delete_image(&mut state, params.file_id).await {
return Ok((
state,
json_response(
StatusCode::INTERNAL_SERVER_ERROR,
Some(json!({
"message": format!(
"failed to delete image with id: {} due to the following exception: {:?}",
params.file_id,
e
)
})),
),
));
};
Ok((
state,
json_response(
StatusCode::OK,
Some(json!({
"message": "file deleted if exists",
"file_id": params.file_id.to_string()
})),
),
))
}
pub async fn list_files(mut state: State) -> HandlerResult {
let payload: FilesListPayload = from_body!(state);
let storage = StorageBackend::take_from(&mut state);
let filter = payload.filter.unwrap_or_else(|| FilterType::All);
let sort = payload.order.unwrap_or_else(|| OrderBy::CreationDate);
let page = payload.page.unwrap_or_else(|| 1usize);
let (status, payload) = match storage.list_entities(filter.clone(), sort, page).await {
Ok(results) => (
StatusCode::OK,
Some(json!({
"page": page,
"filtered_by": filter,
"ordered_by": sort,
"results": results,
})),
),
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
Some(json!({
"message": format!("failed to fetch results for page due to error: {:?}", e)
})),
),
};
Ok((state, json_response(status, payload)))
}

View File

@ -1,137 +0,0 @@
use anyhow::Result;
use async_trait::async_trait;
use bytes::BytesMut;
use gotham_derive::StateData;
use log::error;
use once_cell::sync::OnceCell;
use serde::Deserialize;
use uuid::Uuid;
use crate::backends;
use crate::context::{FilterType, IndexResult, OrderBy};
use crate::image::{ImageFormat, ImagePresetsData};
use crate::traits::ImageStore;
// The bellow definitions are a hack, this is due to
pub(crate) static REDIS: OnceCell<backends::redis::Backend> = OnceCell::new();
pub(crate) static CASSANDRA: OnceCell<backends::cql::Backend> = OnceCell::new();
pub(crate) static POSTGRES: OnceCell<backends::sql::PostgresBackend> = OnceCell::new();
pub(crate) static MYSQL: OnceCell<backends::sql::MySQLBackend> = OnceCell::new();
pub(crate) static SQLITE: OnceCell<backends::sql::SqliteBackend> = OnceCell::new();
#[derive(Clone, Deserialize)]
#[serde(rename_all = "lowercase", tag = "type", content = "config")]
pub enum DatabaseBackend {
Redis(backends::redis::RedisConfig),
Cassandra(backends::cql::DatabaseConfig),
Postgres(backends::sql::DatabaseConfig),
MySQL(backends::sql::DatabaseConfig),
Sqlite(backends::sql::DatabaseConfig),
}
macro_rules! acquire {
( $e:expr ) => {{
$e.get().expect("backend not initialised")
}};
}
#[derive(Copy, Clone, StateData)]
pub enum StorageBackend {
Redis,
Cassandra,
Postgres,
MySQL,
Sqlite,
}
#[async_trait]
impl ImageStore for StorageBackend {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut> {
match self {
Self::Redis => {
acquire!(REDIS)
.get_image(file_id, preset, category, format)
.await
},
Self::Cassandra => {
acquire!(CASSANDRA)
.get_image(file_id, preset, category, format)
.await
},
Self::Postgres => {
acquire!(POSTGRES)
.get_image(file_id, preset, category, format)
.await
},
Self::MySQL => {
acquire!(MYSQL)
.get_image(file_id, preset, category, format)
.await
},
Self::Sqlite => {
acquire!(SQLITE)
.get_image(file_id, preset, category, format)
.await
},
}
}
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
let res = match self {
Self::Redis => acquire!(REDIS).add_image(file_id, category, data).await,
Self::Cassandra => acquire!(CASSANDRA).add_image(file_id, category, data).await,
Self::Postgres => acquire!(POSTGRES).add_image(file_id, category, data).await,
Self::MySQL => acquire!(MYSQL).add_image(file_id, category, data).await,
Self::Sqlite => acquire!(SQLITE).add_image(file_id, category, data).await,
};
if let Err(e) = &res {
error!("failed to add image {:?}", e);
}
res
}
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
let res = match self {
Self::Redis => acquire!(REDIS).remove_image(file_id, presets).await,
Self::Cassandra => acquire!(CASSANDRA).remove_image(file_id, presets).await,
Self::Postgres => acquire!(POSTGRES).remove_image(file_id, presets).await,
Self::MySQL => acquire!(MYSQL).remove_image(file_id, presets).await,
Self::Sqlite => acquire!(SQLITE).remove_image(file_id, presets).await,
};
if let Err(e) = &res {
error!("failed to remove image {:?}", e);
}
res
}
async fn list_entities(
&self,
filter: FilterType,
order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>> {
let res = match self {
Self::Redis => acquire!(REDIS).list_entities(filter, order, page).await,
Self::Cassandra => acquire!(CASSANDRA).list_entities(filter, order, page).await,
Self::Postgres => acquire!(POSTGRES).list_entities(filter, order, page).await,
Self::MySQL => acquire!(MYSQL).list_entities(filter, order, page).await,
Self::Sqlite => acquire!(SQLITE).list_entities(filter, order, page).await,
};
if let Err(e) = &res {
error!("failed to list images {:?}", e);
}
res
}
}

View File

@ -1,34 +0,0 @@
use anyhow::Result;
use async_trait::async_trait;
use bytes::BytesMut;
use uuid::Uuid;
use crate::context::{FilterType, IndexResult, OrderBy};
use crate::image::{ImageFormat, ImagePresetsData};
#[async_trait]
pub trait DatabaseLinker {
async fn ensure_tables(&mut self, presets: Vec<&str>, columns: Vec<ImageFormat>) -> Result<()>;
}
#[async_trait]
pub trait ImageStore {
async fn get_image(
&self,
file_id: Uuid,
preset: String,
category: &str,
format: ImageFormat,
) -> Option<BytesMut>;
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()>;
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()>;
async fn list_entities(
&self,
filter: FilterType,
order: OrderBy,
page: usize,
) -> Result<Vec<IndexResult>>;
}

View File

@ -1,11 +0,0 @@
version: '3'
services:
keydb:
image: eqalpha/keydb:latest
container_name: some-keydb
ports:
- "6379:6379"
volumes:
- ./storage:/data
- ./keydb.conf:/etc/keydb/keydb.conf

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
version: '3'
services:
mariadb:
image: mariadb:latest
container_name: some-maria
ports:
- "3306:3306"
environment:
- MARIADB_ROOT_PASSWORD=admin
- MARIADB_DATABASE=maria
volumes:
- ./storage:/var/lib/mysql

View File

@ -1,24 +0,0 @@
version: '3'
services:
some-scylla:
image: scylladb/scylla
container_name: some-scylla
volumes:
- ./storage/n1:/var/lib/scylla
ports:
- "9042:9042"
some-scylla2:
image: scylladb/scylla
container_name: some-scylla2
command: --seeds=some-scylla
volumes:
- ./storage/n2:/var/lib/scylla
some-scylla3:
image: scylladb/scylla
container_name: some-scylla3
command: --seeds=some-scylla
volumes:
- ./storage/n3:/var/lib/scylla

View File

@ -1,13 +0,0 @@
FROM nginx:latest
# Remove the default Nginx configuration file
RUN rm -v /etc/nginx/nginx.conf
# Copy a configuration file from the current directory
ADD nginx.conf /etc/nginx/
ADD sample /usr/share/nginx/html/
ADD sample /var/www/html/
# Expose ports
EXPOSE 90

View File

@ -1,61 +0,0 @@
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 1024;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
server {
root /usr/share/nginx/html/;
autoindex on;
listen 90;
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 89 KiB

View File

@ -1,40 +0,0 @@
import base64
import aiohttp
import asyncio
queue = asyncio.Queue()
def get_base_data(file: str) -> str:
with open(file, "rb") as file:
data = file.read()
return base64.standard_b64encode(data).decode("utf-8")
async def task():
data = get_base_data("./samples/news.png")
async with aiohttp.ClientSession() as sess:
while not queue.empty():
_ = await queue.get()
async with sess.post(
"http://127.0.0.1:7070/admin/create/image",
json={"format": "png", "data": data}
) as resp:
assert resp.status == 200
await asyncio.sleep(0.2)
async def main():
for _ in range(200_000):
queue.put_nowait(None)
tasks = [task() for _ in range(1)]
t = asyncio.ensure_future(asyncio.gather(*tasks))
while not queue.empty() and not t.done():
print(f"currently, {queue.qsize()} in queue")
await asyncio.sleep(1)
if __name__ == '__main__':
asyncio.run(main())

Binary file not shown.

Before

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

View File

@ -1,109 +0,0 @@
import base64
import requests
import uuid
working_ids = {}
def get_base_data(file: str) -> str:
with open(file, "rb") as file:
data = file.read()
print(f"original {len(data)}")
return base64.standard_b64encode(data).decode("utf-8")
def test_png_upload1():
global working_ids
data = get_base_data("./samples/sunset.jpeg")
payload = {
"format": "jpeg",
"data": data,
}
r = requests.post("http://127.0.0.1:7070/admin/create/image", json=payload)
data = r.json()
assert r.status_code == 200
assert data['data']['category'] == "default"
file_id = data['data']['file_id']
working_ids['default'] = file_id
print(file_id)
def test_get_img_default():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}")
assert r.status_code == 200
def test_get_img_preset_webp():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=webp")
assert r.status_code == 200
def test_get_img_preset_png():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=png")
assert r.status_code == 200
def test_get_img_preset_jpeg():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=jpeg")
assert r.status_code == 200
def test_get_img_format_gif():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=gif")
assert r.status_code == 200
def test_get_img_preset_large():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=large")
assert r.status_code == 200
def test_get_img_preset_medium():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=medium")
assert r.status_code == 200
def test_get_img_preset_small():
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=small")
assert r.status_code == 200
def test_get_nothing1():
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}")
assert r.status_code == 404
def test_get_nothing2():
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=png")
assert r.status_code == 404
def test_get_nothing3():
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=jpeg")
assert r.status_code == 404
def test_get_nothing4():
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=webp")
assert r.status_code == 404
def test_get_nothing5():
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=gif")
assert r.status_code == 404
def test_remove_img1():
r = requests.delete(
f"http://127.0.0.1:7070/admin/delete/image/44524a33-c505-476d-b23b-c42de1fd796a")
print(r.content)
assert r.status_code == 200
if __name__ == '__main__':
test_png_upload1()
test_get_img_default()
test_get_nothing1()
# test_remove_img1()