|
@ -16,7 +16,9 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Test Framework
|
||||
run: cargo install cargo-nextest
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
run: cargo build
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
run: cargo nextest run
|
||||
|
|
|
@ -1,9 +1,3 @@
|
|||
/venv/
|
||||
/config.json
|
||||
/**/target
|
||||
/**/Cargo.lock
|
||||
/database.db
|
||||
/database.db-shm
|
||||
/database.db-wal
|
||||
/tests/backend_runners/**/storage/
|
||||
/tests/noqa
|
||||
/data
|
||||
/target
|
||||
/venv
|
70
Cargo.toml
|
@ -1,8 +1,8 @@
|
|||
[package]
|
||||
name = "lust"
|
||||
version = "1.3.5"
|
||||
authors = ["Harrison Burt <57491488+ChillFish8@users.noreply.github.com>"]
|
||||
edition = "2018"
|
||||
version = "2.0.0"
|
||||
authors = ["Harrison Burt <hburt2003@gmail.com>"]
|
||||
edition = "2021"
|
||||
documentation = "getting-started.md"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
|
@ -14,36 +14,44 @@ description = "A fast, auto-optimising image server designed for multiple backen
|
|||
|
||||
[dependencies]
|
||||
webp = { version = "*", path = "./webp" }
|
||||
image = "0.23"
|
||||
|
||||
base64 = "0.13.0"
|
||||
bytes = "1"
|
||||
|
||||
anyhow = "1"
|
||||
clap = "2"
|
||||
serde_json = "1"
|
||||
serde_variant = "0.1.0"
|
||||
async-trait = "0.1.50"
|
||||
once_cell = "1.7.2"
|
||||
concread = "0.2.14"
|
||||
futures = "0.3"
|
||||
|
||||
log = "0.4.14"
|
||||
pretty_env_logger = "0.4.0"
|
||||
|
||||
gotham = "0.6.0"
|
||||
gotham_derive = "0.6.0"
|
||||
headers = "0.3"
|
||||
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
poem-openapi = { version = "1.3", features = ["redoc", "uuid", "url"] }
|
||||
poem = { version = "1.2", features = ["anyhow"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "0.8.2", features = ["serde", "v4"] }
|
||||
hashbrown = { version = "0.11.2", features = ["serde"] }
|
||||
sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "mysql", "sqlite", "postgres", "chrono", "uuid" ] }
|
||||
redis = { version = "0.20", features = ["tokio-comp", "connection-manager"] }
|
||||
scylla = "0.2.1"
|
||||
mimalloc = { version = "*", default-features = false }
|
||||
clap = { version = "3", features = ["derive", "env"] }
|
||||
strum = { version = "0.24", features = ["derive"] }
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
# Blob storage deps
|
||||
rusoto_core = "0.47.0"
|
||||
rusoto_s3 = "0.47.0"
|
||||
scylla = "0.4.3"
|
||||
|
||||
moka = "0.8.0"
|
||||
rayon = "1.5.1"
|
||||
crc32fast = "1.3.2"
|
||||
enum_dispatch = "0.3.8"
|
||||
hashbrown = "0.12.0"
|
||||
crossbeam = "0.8.1"
|
||||
tracing = "0.1.30"
|
||||
tracing-futures = "0.2.5"
|
||||
tracing-subscriber = "0.3.8"
|
||||
image = "0.24"
|
||||
base64 = "0.13.0"
|
||||
bytes = "1"
|
||||
anyhow = "1"
|
||||
serde_yaml = "0.8.23"
|
||||
serde_json = "1"
|
||||
async-trait = "0.1"
|
||||
once_cell = "1.10.0"
|
||||
futures = "0.3"
|
||||
mime = "0.3.16"
|
||||
|
||||
[dev-dependencies]
|
||||
poem = { version = "1.2", features = ["anyhow", "test"] }
|
||||
|
||||
# [profile.release]
|
||||
# lto = "fat"
|
||||
# codegen-units = 1
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
FROM rust:slim-buster as build
|
||||
|
||||
WORKDIR /code
|
||||
WORKDIR /app
|
||||
|
||||
COPY . /code
|
||||
COPY . /app
|
||||
|
||||
RUN cargo build --release
|
||||
|
||||
|
@ -10,7 +10,7 @@ RUN cargo build --release
|
|||
FROM debian:buster-slim
|
||||
|
||||
WORKDIR /etc/lust
|
||||
COPY --from=build /code/target/release/lust /
|
||||
COPY --from=build /app/target/release/lust /
|
||||
USER root
|
||||
|
||||
ENTRYPOINT ["./lust", "run"]
|
||||
ENTRYPOINT ["./lust", "--host", "0.0.0.0"]
|
||||
|
|
137
README.md
|
@ -1,39 +1,46 @@
|
|||
<p align="center">
|
||||
<img width="60%" src="https://github.com/ChillFish8/lust/blob/master/assets/logo.png" alt="Lust Logo">
|
||||
<img width="50%" src="https://user-images.githubusercontent.com/57491488/160932579-518e61b8-6a3d-4400-a46c-1cb93d461417.png" alt="Lust Logo">
|
||||
</p>
|
||||
|
||||
#
|
||||
<p align="center">
|
||||
<h2 align="center">🔥 Build your own image CDN system your way with lust.</h2>
|
||||
</p>
|
||||
|
||||
## What is Lust?
|
||||
Lust is a static image server designed to automatically convert uploaded image to several formats and preset sizes with scaling in mind.
|
||||
|
||||
Lust stores images via any of given database backends:
|
||||
|
||||
- Redis / KeyDB
|
||||
- Cassandra / ScyllaDB
|
||||
- PostgreSQL
|
||||
- MySQL / MariaDB
|
||||
- Sqlite (file / temp file only)
|
||||
Lust is an **auto-optimising image server**, designed for **high throughput** and **low latency** handling of images, *now that is lustful*.
|
||||
Re-encode uploaded images into `png`, `jpeg`, `webp` or even into `gif` based formats!
|
||||
|
||||
Resize them to your liking automatically with sizing presets, instantly create small,
|
||||
medium and large variants with just a few line in a config file. *Now that's the spirit of lust*
|
||||
|
||||
And much more like caching, on the fly resizing, processing modes to name a few.
|
||||
## Getting started
|
||||
|
||||
### Creating a config file
|
||||
It's highly advised to take a look at some [example config files](/examples/configs) to get an idea
|
||||
of what a general config file would look like.
|
||||
|
||||
Full documentation in markdown form can also be found [here](description.md), this is also
|
||||
served directly by the server as part of the documentation ui endpoint.
|
||||
|
||||
### Installation
|
||||
#### Building from Source
|
||||
To building from source, just clone this repo via `git clone https://github.com/chillfish8/lust.git` and then run `cargo build --release`.
|
||||
#### Docker Images
|
||||
Lust has a set of pre-built, optimised docker images ready to go, they just require having a config.json attached to them and away you go.
|
||||
|
||||
Example Dockerfile:
|
||||
```docker
|
||||
FROM chillfish8/lust:latest
|
||||
|
||||
ADD ./config.json /etc/lust/config.json
|
||||
#### Installing via Cargo
|
||||
You can install lust directly via cargo and the git flag:
|
||||
```shell
|
||||
cargo install lust --git https://github.com/ChillFish8/lust.git
|
||||
```
|
||||
|
||||
You can run the image via `docker run`, you may wish to expose your set ports etc...
|
||||
#### Docker Images
|
||||
Lust has a set of pre-built, optimised docker images ready to go. Just run it with
|
||||
```shell
|
||||
docker run -v "my_config.yaml:/var/lust/my_config.yaml" chillfish8/lust:latest --config-file "/var/lust/my_config.yaml"
|
||||
```
|
||||
|
||||
### After Installation
|
||||
See the [getting started page](https://github.com/ChillFish8/lust/blob/master/getting-started.md) for more information after installation.
|
||||
Once you're up and running navigate to `http://127.0.0.1:8000/ui` or `/ui` of what ever port your server is running on
|
||||
to see the full OpenAPI docs.
|
||||
|
||||
## Caching
|
||||
Lust makes use of a Least Recently Used in-memory cache which can be adjusted for your needs via the `cache_size` key in the configuration file.
|
||||
|
@ -41,19 +48,12 @@ The larger the number the more images it will cache at once and vice versa.
|
|||
*NOTE: With bigger images this can create much higher RAM usage*
|
||||
|
||||
## Scaling
|
||||
Lust's ability to scale is purely down to the backend you use, something like SQLite will obviously suffer
|
||||
at any sort of scale and is meant only really for development purposes.
|
||||
Personally I recommend PostgreSQL (leading to vertical scaling storage) or Scylla (Horizontally scaling storage) depending on your needs.
|
||||
If you want a very small amount of cached images then Postgres will out perform Scylla considerably at random reads however,
|
||||
Scylla is far more suites to large scaling and distributed system as well as large amounts of writes.
|
||||
Lust's ability to scale is purely down to the backend you use, so it is worth noting that
|
||||
the file system backend is only really designed for testing. For full scale deployment
|
||||
consider using Scylla or a s3 compatible blob store to serve data from.
|
||||
|
||||
Performance of each database generally doesn't matter too much due to the processing time of each image
|
||||
being more than the IO latency when adding images and the cache supporting reads, that being said if
|
||||
you have a lot of random inconsistent reads PostgreSQL will likely be the best, or
|
||||
if you want large distributed scaling Scylla will allow you to scale horizontally.
|
||||
|
||||
If you want the best of both worlds I would recommend looking at KeyDB (Redis) with disk persistence, when setup correctly this
|
||||
can be an incredibly powerful setup.
|
||||
If your goal is high-end performance, Scylla DB will be the most performant by a large
|
||||
margin, but this will come with a higher operating cost.
|
||||
|
||||
## Formats
|
||||
Lust supports any of the following formats:
|
||||
|
@ -64,60 +64,29 @@ Lust supports any of the following formats:
|
|||
|
||||
Any uploaded images will be given a unique uuid and be re-encoded into all the other enabled formats in all presets.
|
||||
This is especially useful when you want to serve several variants of the same image with different formats.
|
||||
|
||||
You can also adjust this based on the processing mode, `aot`/*Ahead of time* encoding will follow the old
|
||||
lust behavour by encoding and resizing each image at upload time.
|
||||
|
||||
`jit`/*Just in time* encoding will only resize and re-encode at request time, storing a base copy
|
||||
of the file to generate new images. This can save on a considerable amount of CPU time and disk space
|
||||
depending on your requirements.
|
||||
|
||||
Finally, we have the `realtime` encoder, this will only store an original copy like the `jit` encoder
|
||||
but instead will never save the resized and encoded image, this does also enable the ability to
|
||||
do on the fly resizing and is recommended for situations where you're not expecting to serve image
|
||||
to the public network.
|
||||
|
||||
## Presets
|
||||
The server can take several sizing presets which can be targeted via the `size` query parameter when getting an image. These presets will mean every image at upload time will be resized to fit the width and height bounds using the nearest approximation.
|
||||
The server can take several sizing presets which can be targeted via the `size`
|
||||
query parameter when getting an image.
|
||||
These presets will mean every image at upload time will be resized to
|
||||
fit the width and height bounds using the configured resizing filter
|
||||
(defaults to nearest neighbour).
|
||||
|
||||
Regardless of presets an `original` image is always stored and can be accessed via the `size=original` query.
|
||||
The default preset when served without a `sized` parameter can be set in the configuration file via `default_serving_preset` key.
|
||||
|
||||
## Webp Optimisation
|
||||
Lust supports automatic webp encoding, by default it encodes with lossless compression but this can be changed via the `webp_quality` key in the configuration file
|
||||
and should be a float from `0.0` to `100.0` with the quality of the image changing respectively.
|
||||
|
||||
## Base64 Support
|
||||
|
||||
Lust will serve given images / gifs as Base64 data the `encode` query parameter (`true`/`false`) this will return
|
||||
a JSON response unlike the tradition raw response.
|
||||
The default preset when served without a `size` parameter can be set in the configuration file via `default_serving_preset` key.
|
||||
|
||||
## Data Efficiency
|
||||
Lust's data storage efficiency is roughly the same as storing on a plain file system outside of any system the database backend employs when storing the data.
|
||||
|
||||
For example lets upload an image:
|
||||
<p align="left">
|
||||
<img width="50%" src="https://github.com/ChillFish8/lust/blob/master/assets/news.png" alt="Medium image">
|
||||
</p>
|
||||
|
||||
This image is about 91KB in size as a single image, If we upload this and convert to the 3 base image formats with some presets:
|
||||
|
||||
```json5
|
||||
{
|
||||
'data': {
|
||||
'file_id': 'ccbe2207-8629-4938-9da9-3f75706f9b4e',
|
||||
'formats': {
|
||||
'large': { // Resized to 128px x 128px
|
||||
'jpeg': 3460,
|
||||
'png': 5292,
|
||||
'webp': 3006
|
||||
},
|
||||
'medium': { // Resized to 64px x 64px
|
||||
'jpeg': 1543,
|
||||
'png': 1738,
|
||||
'webp': 1022
|
||||
},
|
||||
'original': {
|
||||
'jpeg': 42846,
|
||||
'png': 103672,
|
||||
'webp': 53982
|
||||
},
|
||||
'small': { // Resized to 32px x 32px
|
||||
'jpeg': 912,
|
||||
'png': 629,
|
||||
'webp': 354
|
||||
}
|
||||
},
|
||||
'status': 200
|
||||
}
|
||||
```
|
||||
|
||||
We can see with the `original` size totals around 200KB which is is fairly reasonable with zero compression PNG encoding and lossless webp formats.
|
||||
Lust's data storage efficiency is roughly the same as storing on a plain file system outside any
|
||||
system the database backend employs when storing the data.
|
||||
|
|
BIN
assets/logo.png
Before Width: | Height: | Size: 88 KiB After Width: | Height: | Size: 34 KiB |
BIN
assets/news.png
Before Width: | Height: | Size: 89 KiB |
|
@ -0,0 +1,178 @@
|
|||
# The Lust 2 documentation
|
||||
|
||||
Welcome to the Lust 2 API documentation!
|
||||
|
||||
This contains section contains the configuration documentation for running and building your system.
|
||||
|
||||
## CLI
|
||||
```shell
|
||||
lust 2.0.0
|
||||
Harrison Burt <hburt2003@gmail.com>
|
||||
A fast, auto-optimising image server designed for multiple backends with throughput and latency in
|
||||
mind.
|
||||
|
||||
USAGE:
|
||||
lust.exe [OPTIONS] --config-file <CONFIG_FILE>
|
||||
|
||||
OPTIONS:
|
||||
--config-file <CONFIG_FILE>
|
||||
The file path to a given config file.
|
||||
|
||||
This can be either a JSON formatted config or YAML.
|
||||
|
||||
[env: CONFIG_FILE=]
|
||||
|
||||
-d, --docs-url <DOCS_URL>
|
||||
The external URL that would be used to access the server if applicable.
|
||||
|
||||
This only affects the documentation.
|
||||
|
||||
[env: DOCS_URL=]
|
||||
|
||||
-h, --host <HOST>
|
||||
The binding host address of the server
|
||||
|
||||
[env: HOST=]
|
||||
[default: 127.0.0.1]
|
||||
|
||||
--help
|
||||
Print help information
|
||||
|
||||
--log-level <LOG_LEVEL>
|
||||
[env: LOG_LEVEL=]
|
||||
[default: info]
|
||||
|
||||
-p, --port <PORT>
|
||||
[env: PORT=]
|
||||
[default: 8000]
|
||||
|
||||
-V, --version
|
||||
Print version information
|
||||
```
|
||||
|
||||
## Config File
|
||||
This is a demo config file outlining and explain each configuration key.
|
||||
|
||||
*Note: This is in the YAML format, but an equivalent in JSON is also supported*
|
||||
|
||||
```yaml
|
||||
global_cache:
|
||||
# We cache upto 1GB's worth of the most recently used images.
|
||||
# Like the bucket cache a max_images limit can also be applied
|
||||
# but not used in tandem with the max_capacity limit.
|
||||
# If this is `null`/unset then no caching is performed.
|
||||
max_capacity: 1024
|
||||
|
||||
# The *global* max upload size allowed in KB.
|
||||
#
|
||||
# This takes precedence over bucket level limits.
|
||||
max_upload_size: 4096 # 4MB
|
||||
|
||||
# The global max concurrency.
|
||||
#
|
||||
# This takes precedence over bucket level limits.
|
||||
max_concurrency: 500
|
||||
|
||||
# A custom base path to serve images out of.
|
||||
# This gets appended to the `v1` route and must start with a `/`
|
||||
base_serving_path: "/images"
|
||||
|
||||
backend:
|
||||
filesystem: # Can be any of 'scylla', 'filesystem' or 'blobstorage'
|
||||
|
||||
# Attributes are specific to the selectect backend.
|
||||
# For the filesystem backend only the `directory` arguement is required
|
||||
# and is the base directory for images to be stored.
|
||||
directory: "/data"
|
||||
|
||||
# scylla attributes
|
||||
#
|
||||
# nodes: # A list of known nodes.
|
||||
# - "127.0.0.1:9042"
|
||||
# keyspace: lust # The keyspace must be created ahead of time.
|
||||
# username: 'my-user' # Optional
|
||||
# password: 'my-pass' # Optional
|
||||
# table: 'images' # Optional, defaults to `lust_images`
|
||||
|
||||
# blobstore attributes
|
||||
#
|
||||
# This also requires `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID`
|
||||
# environment variables for auth.
|
||||
# name: "my-bucket"
|
||||
# region: "my-s3-region"
|
||||
# endpoint: "https://s3.eu2.my-endpoint.com"
|
||||
# store_publc: false # If true, images are uploaded with acl: `public-read`.
|
||||
|
||||
buckets:
|
||||
my-profile-pictures:
|
||||
mode: jit # 'jit', 'aot' or 'realtime' are allowed.
|
||||
|
||||
formats:
|
||||
png: true # Disable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: true # Enable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
# The format to store the original image in.
|
||||
# This will be used by the 'jit' and 'realtime' encoders
|
||||
# when a image is requested as a base.
|
||||
# This probably does not want to be a lossy format.
|
||||
original_image_store_format: jpeg
|
||||
|
||||
webp_config:
|
||||
# This parameter is the amount of effort put into the
|
||||
# compression: 0 is the fastest but gives larger
|
||||
# files compared to the slowest, but best, 100.
|
||||
#
|
||||
# If set to `null` this will enable lossless encoding.
|
||||
quality: 80 # Set lossy quality to 80% (0.0 - 100.0)
|
||||
|
||||
# The quality / speed trade-off (0=fast, 6=slower-better)
|
||||
method: 4
|
||||
|
||||
# With lossless encoding is the ratio of compression to speed.
|
||||
# If using lossy encoding this does nothing.
|
||||
# float: 0.0 (worse) - 100.0 inclusive (better but slower).
|
||||
# compression: 60
|
||||
|
||||
threading: true # Enable multithreaded encoding.
|
||||
|
||||
# The encoding format to serve the image as if not explicitly specified.
|
||||
# Defaults to the first enabled encoding format is no set.
|
||||
default_serving_format: jpeg
|
||||
|
||||
# The default resizing preset to serve images as.
|
||||
# If this is not set, the original file sizing is used.
|
||||
default_serving_preset: null
|
||||
|
||||
presets:
|
||||
# Makes a preset named 'small' which can be access when
|
||||
# requesting an image with the `size=small` query parameter.
|
||||
small:
|
||||
width: 96 # 96px
|
||||
height: 96 # 96px
|
||||
|
||||
# The resizing filter to use in order of performance vs quality:
|
||||
# 'nearest', 'triangle', 'catmullrom',
|
||||
# 'gaussian' and 'lanczos3' supported.
|
||||
filter: triangle
|
||||
|
||||
# The in-memory cache config.
|
||||
# If left unset the system will attempt to use the global
|
||||
# cache if enabled, otherwise no caching will be applied.
|
||||
cache:
|
||||
# We cache upto the top 100 most recently used images.
|
||||
max_images: 100
|
||||
|
||||
# We can also use max_capacity (but not with max_images as well)
|
||||
# This will cache by the memory usage limit vs the amount of images.
|
||||
# max_capacity: 500 # 500MB limit
|
||||
|
||||
# The *bucket local* max upload size allowed for this bucket in KB.
|
||||
# No 'realistic' limit is applied if let unset.
|
||||
max_upload_size: 2049 # 2MB
|
||||
|
||||
# The *bucket local* max concurrent operations.
|
||||
# No limit is applied if left unset.
|
||||
max_concurrency: 200
|
||||
```
|
|
@ -0,0 +1,33 @@
|
|||
backend:
|
||||
filesystem: # Use the filesystem backend.
|
||||
directory: "data"
|
||||
|
||||
base_serving_path: "/images" # Serve buckets out of `/images`
|
||||
global_cache:
|
||||
max_images: 1000 # At most cache 1000 images.
|
||||
# max_capacity: 500 # Or we can set 500MB max capacity.
|
||||
|
||||
buckets:
|
||||
user-profiles: # Define a bucket called "user-profiles", this is accessable out of `/images/user-profiles`.
|
||||
mode: jit # Optimise images as and when they're required then store them.
|
||||
formats:
|
||||
png: false # Disable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: true # Enable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
webp_config:
|
||||
quality: 80 # Set lossy quality to 80%
|
||||
method: 4 # Opt on the side of performance slightly more than quality.
|
||||
threading: true # Enable multi-threaded encoding.
|
||||
|
||||
default_serving_format: webp # Serve the WebP format by default.
|
||||
default_serving_preset: medium-square # Use the "medium-square" sizing preset by default.
|
||||
|
||||
presets:
|
||||
medium-square: # Define a new resizing preset.
|
||||
width: 500 # 500px
|
||||
height: 500 # 500px
|
||||
|
||||
cache: null # Use the global cache handler.
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
backend:
|
||||
scylla:
|
||||
nodes:
|
||||
- "127.0.0.1:9042"
|
||||
keyspace: lust # The keyspace must be created ahead of time.
|
||||
|
||||
base_serving_path: "/images" # Serve buckets out of `/images`
|
||||
global_cache:
|
||||
max_capacity: 500 # We set 500MB max cache capacity.
|
||||
|
||||
buckets:
|
||||
user-profiles: # Define a bucket called "user-profiles", this is accessable out of `/images/user-profiles`.
|
||||
# mode: aot # Optimise images as and when they're required then store them.
|
||||
# formats:
|
||||
# png: true # Enable PNG encoding.
|
||||
# jpeg: true # Enable JPEG encoding.
|
||||
# webp: false # Disable WebP encoding.
|
||||
# gif: false # Disable GIF encoding.
|
||||
#
|
||||
# default_serving_format: jpeg # Serve the WebP format by default.
|
||||
#
|
||||
# cache: null # Use the global cache handler.
|
||||
|
||||
mode: aot # Optimise images as and when they're required then store them.
|
||||
formats:
|
||||
png: true # Disable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: true # Enable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
original_image_store_format: jpeg
|
||||
|
||||
webp_config:
|
||||
quality: 80 # Set lossy quality to 80%
|
||||
method: 4 # Opt on the side of performance slightly more than quality.
|
||||
threading: true # Enable multi-threaded encoding.
|
||||
|
||||
default_serving_format: png # Serve the WebP format by default.
|
||||
default_serving_preset: medium-square # Use the "medium-square" sizing preset by default.
|
||||
|
||||
presets:
|
||||
medium-square: # Define a new resizing preset.
|
||||
width: 500 # 500px
|
||||
height: 500 # 500px
|
||||
|
||||
cache: null # Use the global cache handler.
|
||||
|
After Width: | Height: | Size: 1.0 MiB |
|
@ -0,0 +1,21 @@
|
|||
import requests
|
||||
|
||||
with open("./example.jpeg", "rb") as file:
|
||||
image = file.read()
|
||||
|
||||
r = requests.post(
|
||||
"http://127.0.0.1:8000/v1/images/user-profiles",
|
||||
params={"format": "jpeg"},
|
||||
headers={
|
||||
"content-length": str(len(image)),
|
||||
"content-type": "application/octet-stream"
|
||||
},
|
||||
data=image,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
print(f"My image id: {data['image_id']}")
|
||||
print(f"It took {data['processing_time']}s to complete!")
|
||||
print(f"And has a checksum of {data['checksum']}!")
|
|
@ -1,180 +0,0 @@
|
|||
# Contents
|
||||
|
||||
- [Setting Up Lust](#initalising-a-configuration)
|
||||
- [Changing Configuration](#configuration-guide)
|
||||
- [Running Lust](#running-lust)
|
||||
- [Uploading an Image](#uploading-images)
|
||||
- [Requesting Images](#requesting-images)
|
||||
- [Removing Images](#removing-images)
|
||||
- [Listing Images](#listing-images)
|
||||
|
||||
# Initialising a Configuration
|
||||
Lust requires a configuration file to exist always and has many manditory keys. Because of this there is a utility command `init` which can be used to generate default configuration files.
|
||||
|
||||
#### Usage:
|
||||
`lust init --backend <backend>`
|
||||
|
||||
The backend can be set to any of the valid backends:
|
||||
- `postgres` -> Covers PostgreSQl.
|
||||
- `mysql` -> Covers MySQL and MariaDB.
|
||||
- `sqlite` -> Covers Sqlite.
|
||||
- `cassandra` -> Covers Cassandra and Scylla (v4 protocol) **WARNING: This is very beta in terms of performant configuration**
|
||||
- `redis` -> Covers Redis and KeyDB (Faster redis) **Does not support listing files but very performant**
|
||||
|
||||
Once the file is generated you can change the configuration as you wish however, be careful not to remove keys.
|
||||
|
||||
# Configuration Guide
|
||||
Lust comes with several configurable controls which may seem confusing to some at first, so here's a helpful list of keys and their respective purpose.
|
||||
|
||||
### Server Configuration
|
||||
|
||||
- `log_level` -> What level of logging is enabled, this can be any of: `info` - `debug` - `error` - `off`
|
||||
- `base_data_path` -> The base path images are served from. **This cannot be `admin` due to being reserved.**
|
||||
- `cache_size` -> The maximum amount of images to keep in cache at once, this is based of a LRU eviction strategy.
|
||||
- `database_backend` -> The database specific configuration (See the database configuration section bellow.)
|
||||
- `default_serving_format` -> The format served when no `format` query parameter is passed when requesting an image.
|
||||
- `default_serving_preset` -> The default sizing preset to serve, this can be any preset or `original`.
|
||||
- `formats` -> A set of format-boolean pairs toggling the enabled formats which will be saved and re-encoded e.g.<br/> ```{
|
||||
"gif": false,
|
||||
"jpeg": true,
|
||||
"png": true,
|
||||
"webp": true
|
||||
}```
|
||||
- `host` -> The binding host e.g. `127.0.0.1`.
|
||||
- `port` -> The binding port e.g. `7070`.
|
||||
- `size_presets` -> A set of maps defining seperate size presets which will auto resize images (See the size presets configuration section bellow.)
|
||||
- `webp_ratio` -> The ratio of **lossy compression** for webp images from `0.0` to `100.0` inclusive for minimal and maximal quality respectively. This can be set to `null` to put the encoder into **lossless compression** mode.
|
||||
- `webp_compression ` -> with lossless encoding is the ratio of compression to speed. If using lossy encoding this does nothing - (float: 0.0 - 100.0 inclusive).
|
||||
- `webp_method` -> The quality/speed trade-off (0=fast, 6=slower-better)
|
||||
- `webp_threading` -> A bool singling if multi-threading encoding should be attempted.
|
||||
|
||||
### Database Configuration
|
||||
Lust supports any of the following backends:
|
||||
- `postgres` -> Covers PostgreSQl.
|
||||
- `mysql` -> Covers MySQL and MariaDB.
|
||||
- `sqlite` -> Covers Sqlite.
|
||||
- `cassandra` -> Covers Cassandra and Scylla (v4 protocol) **WARNING: This is very beta in terms of performant configuration**
|
||||
- `redis` -> Covers Redis and KeyDB (Faster redis) **Does not support listing files but very performant**
|
||||
|
||||
When configuring a backend in the server config the format should look like:
|
||||
```json5
|
||||
{
|
||||
"config": {
|
||||
// Backend Specific
|
||||
},
|
||||
"type": "<backend>"
|
||||
}
|
||||
```
|
||||
|
||||
Each backend has a specific configuration layout see bellow:
|
||||
|
||||
### SQL based databases (Sqlite, PostgreSQL, MySQL)
|
||||
- `connection_uri` -> The direct connection URI e.g. `postgres://user:pass@localhost/postgres`.
|
||||
- `pool_size` -> The maximum connection pool size.
|
||||
|
||||
### Redis based databases (Redis, KeyDB)
|
||||
- `connection_uri` -> The direct connection URI e.g. `redis://user:pass@localhost/0`.
|
||||
- `pool_size` -> The maximum connection pool size.
|
||||
|
||||
### Cassandra
|
||||
- `clusters` -> An array of strings following the `"ip:port"` format, each cluster should be one ip per machine.
|
||||
- `keyspace` -> A detailed specification of the keyspace replication as specified bellow.
|
||||
- `user` -> The username to connect with.
|
||||
- `password` -> The password to connect with.
|
||||
|
||||
#### Keyspace Spec
|
||||
Currently only `SimpleStrategy` and `NetworkTopologyStrategy` is supported.
|
||||
|
||||
#### SimpleStrategy Example
|
||||
```json5
|
||||
{
|
||||
"spec": {
|
||||
"replication_factor": 3
|
||||
},
|
||||
"strategy": "SimpleStrategy"
|
||||
}
|
||||
```
|
||||
|
||||
#### NetworkTopologyStrategy Example
|
||||
```json5
|
||||
{
|
||||
"spec": [
|
||||
{"node_name": "DC1", "replication": 3},
|
||||
{"node_name": "DC2", "replication": 2}
|
||||
],
|
||||
"strategy": "NetworkTopologyStrategy"
|
||||
}
|
||||
```
|
||||
|
||||
### Size Preset Configuration
|
||||
Each preset name must be unique hence requires being defined like a map.
|
||||
Each preset has a `width` and `height` key defining the sizing of the image.
|
||||
|
||||
**An `original` preset always exists and contains the original image uploaded**
|
||||
|
||||
#### Example
|
||||
```json5
|
||||
{
|
||||
"large": {
|
||||
"height": 128,
|
||||
"width": 128
|
||||
},
|
||||
"medium": {
|
||||
"height": 64,
|
||||
"width": 64
|
||||
},
|
||||
"small": {
|
||||
"height": 32,
|
||||
"width": 32
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# Running Lust
|
||||
|
||||
Once the configuration has been setup you can use the `run` command to start the server: `lust run`
|
||||
|
||||
# Uploading Images
|
||||
|
||||
Lust requires images be uploaded as a Base64 encoded image via a JSON POST request to `/admin/create/image`. The body should follow the following schema:
|
||||
|
||||
| Field | Description | Required? |
|
||||
|----------|--------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
|
||||
| `format` | The format of the image e.g. `png`, `jpeg`, etc... | Yes |
|
||||
| `data` | The base64 encoded image data. | Yes |
|
||||
| `category` | The category to add the image to, this will make the image accessible from `/<base>/:category/:id` rather than the default `/<base>/:id`. | No (Default: 'default') |
|
||||
|
||||
# Requesting Images
|
||||
Lust will serve images on the base route given via the `base_data_path` field in the config file, lets say this is `/images`, we can request an uploaded image with this path e.g. `http://127.0.0.1:7070/images/394e7905-f501-4be8-902f-b8b7ea9d157a`. If the image exists in the default category the server will return the image in the format specified by the `default_serving_format` from the preset defined with `default_serving_preset` in the configuration.
|
||||
|
||||
Each image request can have the optional query parameters:
|
||||
| Field | Description |
|
||||
|----------|-------------------------------------------------------------------------------------------|
|
||||
| `format` | Request a specific format of the image e.g. `webp`. |
|
||||
| `encode` | Encodes the image with standard base64 encoding and returns the image as a JSON response. |
|
||||
| `preset` | Requests a specific preset of the image e.g. `original`. |
|
||||
|
||||
# Removing Images
|
||||
Images can be removed via the `/admin/delete/image/:id` endpoint via a DELETE request with a JSON body.
|
||||
The id should be the file's given UUID, no category is required because it is always unique.
|
||||
*NOTE: This endpoint will always return 200 OK if an image doesnt exist, this is just a behavour of querying the database without pre-checking if it exists.*
|
||||
|
||||
# Listing Images
|
||||
Lust gives you the ability to list and order the results in the database. **WARNING: Cassandra backends can regularly run into TimeoutErrors due to the nature of this request.**
|
||||
|
||||
Listing images be can accessed via the `/admin/list` endpoint and expects a POST request with a JSON body, all entries are chunked into 'pages' of `50` items per page.
|
||||
|
||||
An example body would look like:
|
||||
```json5
|
||||
{
|
||||
"page": 1,
|
||||
"filter": {
|
||||
"filter_type": "category", // This can be any of 'all', 'category', 'creationdate'
|
||||
"with_value": "default" // Only required when using the 'category' or 'creationdate' filters.
|
||||
},
|
||||
"order": "creationdate" // Can be either of creationdate' or 'totalsize'.
|
||||
}
|
||||
```
|
||||
|
||||
*NOTE: The Cassandra backends will ignore the `order` field due to CQL limitations, all values with be in creation date order instead*
|
||||
|
|
@ -3,4 +3,5 @@ combine_control_expr = false
|
|||
imports_layout = "HorizontalVertical"
|
||||
match_block_trailing_comma = true
|
||||
imports_granularity = "Module"
|
||||
group_imports = "StdExternalCrate"
|
||||
group_imports = "StdExternalCrate"
|
||||
max_width = 89
|
|
@ -1,379 +0,0 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
||||
use hashbrown::HashMap;
|
||||
use log::{debug, info, warn};
|
||||
use scylla::query::Query;
|
||||
use scylla::statement::prepared_statement::PreparedStatement;
|
||||
use scylla::transport::session::Session;
|
||||
use scylla::{QueryResult, SessionBuilder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_variant::to_variant_name;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::configure::PAGE_SIZE;
|
||||
use crate::context::{FilterType, IndexResult, OrderBy};
|
||||
use crate::image::{ImageFormat, ImagePresetsData};
|
||||
use crate::traits::{DatabaseLinker, ImageStore};
|
||||
|
||||
/// Represents a connection pool session with a round robbin load balancer.
|
||||
type CurrentSession = Session;
|
||||
|
||||
type PagedRow = (Uuid, String, i64, i32);
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "strategy", content = "spec")]
|
||||
enum ReplicationClass {
|
||||
SimpleStrategy(SimpleNode),
|
||||
NetworkTopologyStrategy(Vec<DataCenterNode>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
struct SimpleNode {
|
||||
replication_factor: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
struct DataCenterNode {
|
||||
node_name: String,
|
||||
replication: usize,
|
||||
}
|
||||
|
||||
/// The configuration for a cassandra database.
|
||||
///
|
||||
/// Each cluster should be given in the `host:port` format and
|
||||
/// should only be the main node (not replication nodes).
|
||||
///
|
||||
/// The replication_factor is used when the keyspace is first created,
|
||||
/// if the keyspace already exists this number may be ignored despite
|
||||
/// being changed due to current implementation limitations.
|
||||
///
|
||||
/// The replication_class is used when the keyspace is first created,
|
||||
/// this has the same caveats as the replication_factor.
|
||||
#[derive(Clone, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
clusters: Vec<String>,
|
||||
keyspace: ReplicationClass,
|
||||
user: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
macro_rules! log_and_convert_error {
|
||||
( $e:expr ) => {{
|
||||
match $e {
|
||||
Ok(frame) => Some(frame),
|
||||
Err(e) => {
|
||||
warn!("failed to execute query {:?}", e);
|
||||
None
|
||||
},
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
async fn get_page(
|
||||
filter: &FilterType,
|
||||
session: &CurrentSession,
|
||||
stmt: &PreparedStatement,
|
||||
page_state: Option<Bytes>,
|
||||
) -> Result<QueryResult> {
|
||||
Ok(match &filter {
|
||||
FilterType::All => session.execute_paged(stmt, &[], page_state).await?,
|
||||
FilterType::CreationDate(v) => {
|
||||
session
|
||||
.execute_paged(stmt, (v.to_string(),), page_state)
|
||||
.await?
|
||||
},
|
||||
FilterType::Category(v) => session.execute_paged(stmt, (v,), page_state).await?,
|
||||
})
|
||||
}
|
||||
|
||||
/// A cassandra database backend.
|
||||
pub struct Backend {
|
||||
session: CurrentSession,
|
||||
check_cat: Option<PreparedStatement>,
|
||||
get_file: HashMap<String, HashMap<String, PreparedStatement>>,
|
||||
}
|
||||
|
||||
impl Backend {
|
||||
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
|
||||
info!("connecting to database");
|
||||
let session = SessionBuilder::new()
|
||||
.user(cfg.user, cfg.password)
|
||||
.known_nodes(cfg.clusters.as_ref())
|
||||
.build()
|
||||
.await?;
|
||||
info!("connect successful");
|
||||
|
||||
let replication = match cfg.keyspace {
|
||||
ReplicationClass::SimpleStrategy(node) => {
|
||||
format!(
|
||||
"'class': 'SimpleStrategy', 'replication_factor': {}",
|
||||
node.replication_factor,
|
||||
)
|
||||
},
|
||||
ReplicationClass::NetworkTopologyStrategy(mut nodes) => {
|
||||
let mut spec = nodes
|
||||
.drain(..)
|
||||
.map(|v| format!("'{}': {}", v.node_name, v.replication))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
spec.insert(0, "'class' : 'NetworkTopologyStrategy'".to_string());
|
||||
|
||||
spec.join(", ")
|
||||
},
|
||||
};
|
||||
|
||||
let create_ks = format!(
|
||||
"CREATE KEYSPACE IF NOT EXISTS lust_ks WITH REPLICATION = {{{}}};",
|
||||
replication
|
||||
);
|
||||
debug!("creating keyspace {}", &create_ks);
|
||||
|
||||
let _ = session.query(create_ks, &[]).await?;
|
||||
info!("keyspace ensured");
|
||||
|
||||
Ok(Self {
|
||||
session,
|
||||
check_cat: None,
|
||||
get_file: HashMap::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseLinker for Backend {
|
||||
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
|
||||
info!("building tables");
|
||||
|
||||
let query = r#"
|
||||
CREATE TABLE IF NOT EXISTS lust_ks.image_metadata (
|
||||
file_id UUID,
|
||||
category TEXT,
|
||||
insert_date TIMESTAMP,
|
||||
total_size BIGINT,
|
||||
PRIMARY KEY ((file_id), category)
|
||||
) WITH CLUSTERING ORDER BY (category DESC);
|
||||
"#;
|
||||
|
||||
self.session.query(query, &[]).await?;
|
||||
info!("metadata table created successfully");
|
||||
|
||||
let query = r#"
|
||||
CREATE INDEX IF NOT EXISTS ON lust_ks.image_metadata (category);
|
||||
"#;
|
||||
|
||||
self.session.query(query, &[]).await?;
|
||||
info!("metadata table index created successfully");
|
||||
|
||||
let mut columns = vec![format!("file_id UUID PRIMARY KEY")];
|
||||
|
||||
for format in formats.iter() {
|
||||
let column = to_variant_name(format).expect("unreachable");
|
||||
columns.push(format!("{} BLOB", column))
|
||||
}
|
||||
|
||||
for preset in presets {
|
||||
let query = format!(
|
||||
"CREATE TABLE IF NOT EXISTS lust_ks.{table} ({columns})",
|
||||
table = preset,
|
||||
columns = columns.join(", ")
|
||||
);
|
||||
|
||||
self.session.query(query, &[]).await?;
|
||||
debug!("created preset table {}", preset);
|
||||
|
||||
for format in formats.iter() {
|
||||
let column = to_variant_name(format).expect("unreachable");
|
||||
|
||||
let qry = format!(
|
||||
"SELECT {column} FROM lust_ks.{table} WHERE file_id = ? LIMIT 1;",
|
||||
column = column,
|
||||
table = preset,
|
||||
);
|
||||
|
||||
let prepared = self.session.prepare(qry).await?;
|
||||
debug!("prepared check query {:?}", format);
|
||||
|
||||
if let Some(tbl) = self.get_file.get_mut(preset) {
|
||||
tbl.insert(column.to_string(), prepared);
|
||||
} else {
|
||||
let mut new_map = HashMap::new();
|
||||
new_map.insert(column.to_string(), prepared);
|
||||
self.get_file.insert(preset.to_string(), new_map);
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("tables created");
|
||||
|
||||
let qry = r#"
|
||||
SELECT file_id FROM lust_ks.image_metadata
|
||||
WHERE file_id = ? AND category = ?;
|
||||
"#;
|
||||
let prepared = self.session.prepare(qry).await?;
|
||||
self.check_cat = Some(prepared);
|
||||
|
||||
info!("prepared all queries and tables");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for Backend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
let prepared = self.check_cat.as_ref().unwrap();
|
||||
let query_result =
|
||||
log_and_convert_error!(self.session.execute(prepared, (file_id, category)).await)?;
|
||||
|
||||
let _ = query_result.rows?;
|
||||
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
let prepared = self.get_file.get(&preset)?.get(column)?;
|
||||
|
||||
let query_result =
|
||||
log_and_convert_error!(self.session.execute(prepared, (file_id,)).await)?;
|
||||
|
||||
let mut rows = query_result.rows?;
|
||||
let row = rows.pop()?;
|
||||
let (data,) = log_and_convert_error!(row.into_typed::<(Vec<u8>,)>())?;
|
||||
let ref_: &[u8] = data.as_ref();
|
||||
Some(BytesMut::from(ref_))
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let mut total: i64 = 0;
|
||||
for (preset, preset_data) in data {
|
||||
let sum: i64 = preset_data.values().map(|v| v.len() as i64).sum();
|
||||
total += sum;
|
||||
|
||||
let columns: String = preset_data
|
||||
.keys()
|
||||
.map(|v| to_variant_name(v).expect("unreachable"))
|
||||
.collect::<Vec<&str>>()
|
||||
.join(", ");
|
||||
|
||||
let placeholders: String = (0..preset_data.len())
|
||||
.map(|_| "?")
|
||||
.collect::<Vec<&str>>()
|
||||
.join(", ");
|
||||
|
||||
let mut values: Vec<Vec<u8>> = preset_data.values().map(|v| v.to_vec()).collect();
|
||||
|
||||
values.insert(0, file_id.as_bytes().to_vec());
|
||||
|
||||
let qry = format!(
|
||||
"INSERT INTO lust_ks.{table} (file_id, {columns}) VALUES (?, {placeholders});",
|
||||
table = preset,
|
||||
columns = columns,
|
||||
placeholders = placeholders,
|
||||
);
|
||||
|
||||
let prepared = self.session.prepare(qry).await?;
|
||||
self.session.execute(&prepared, values).await?;
|
||||
}
|
||||
|
||||
let qry = r#"
|
||||
INSERT INTO lust_ks.image_metadata (
|
||||
file_id,
|
||||
category,
|
||||
insert_date,
|
||||
total_size
|
||||
) VALUES (?, ?, ?, ?);"#;
|
||||
|
||||
let now = Utc::now();
|
||||
|
||||
self.session
|
||||
.query(qry, (file_id, category, now.timestamp(), total))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
|
||||
for preset in presets {
|
||||
let qry = format!(
|
||||
"DELETE FROM lust_ks.{table} WHERE file_id = ?;",
|
||||
table = preset,
|
||||
);
|
||||
|
||||
self.session
|
||||
.query(qry, (file_id.as_bytes().to_vec(),))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let qry = "DELETE FROM lust_ks.image_metadata WHERE file_id = ?;";
|
||||
|
||||
self.session.query(qry, (file_id,)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
_order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
let qry = format!(
|
||||
r#"
|
||||
SELECT file_id, category, insert_date, total_size
|
||||
FROM lust_ks.image_metadata
|
||||
"#,
|
||||
);
|
||||
|
||||
let mut query = match &filter {
|
||||
FilterType::All => {
|
||||
let qry = format!("{};", qry);
|
||||
Query::new(qry)
|
||||
},
|
||||
FilterType::CreationDate(_) => {
|
||||
let qry = format!("{} WHERE insert_date = ?;", qry);
|
||||
Query::new(qry)
|
||||
},
|
||||
FilterType::Category(_) => {
|
||||
let qry = format!("{} WHERE category = ?;", qry);
|
||||
Query::new(qry)
|
||||
},
|
||||
};
|
||||
|
||||
query.set_page_size(PAGE_SIZE as i32);
|
||||
let prepared = self.session.prepare(query).await?;
|
||||
let mut page_state = None;
|
||||
|
||||
for _ in 0..page - 1 {
|
||||
let rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?;
|
||||
|
||||
page_state = rows.paging_state;
|
||||
}
|
||||
|
||||
let target_rows = get_page(&filter, &self.session, &prepared, page_state.clone()).await?;
|
||||
|
||||
let results = if let Some(mut rows) = target_rows.rows {
|
||||
rows.drain(..)
|
||||
.map(|r| {
|
||||
let r = r
|
||||
.into_typed::<PagedRow>()
|
||||
.expect("database format invalidated");
|
||||
|
||||
let res = IndexResult {
|
||||
file_id: r.0,
|
||||
category: r.1,
|
||||
created_on: DateTime::from_utc(NaiveDateTime::from_timestamp(r.2, 0), Utc),
|
||||
total_size: r.3,
|
||||
};
|
||||
|
||||
res
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
pub mod cql;
|
||||
pub mod redis;
|
||||
pub mod sql;
|
|
@ -1,148 +0,0 @@
|
|||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
use log::error;
|
||||
use redis::aio::ConnectionManager;
|
||||
use redis::{AsyncCommands, AsyncIter};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::context::{FilterType, IndexResult, OrderBy};
|
||||
use crate::image::{ImageFormat, ImagePresetsData};
|
||||
use crate::traits::{DatabaseLinker, ImageStore};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct RedisConfig {
|
||||
connection_uri: String,
|
||||
pool_size: usize,
|
||||
}
|
||||
|
||||
struct RedisPool {
|
||||
connections: Vec<ConnectionManager>,
|
||||
index: AtomicUsize,
|
||||
}
|
||||
|
||||
impl RedisPool {
|
||||
pub async fn connect(cfg: RedisConfig) -> Result<Self> {
|
||||
let client = redis::Client::open(cfg.connection_uri)?;
|
||||
let mut conns = Vec::new();
|
||||
for _ in 0..cfg.pool_size {
|
||||
let conn = client.get_tokio_connection_manager().await?;
|
||||
conns.push(conn);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
connections: conns,
|
||||
index: AtomicUsize::new(0),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get(&self) -> ConnectionManager {
|
||||
let index = self.index.load(Ordering::Relaxed);
|
||||
let conn = self.connections[index].clone();
|
||||
|
||||
if index == (self.connections.len() - 1) {
|
||||
self.index.store(0, Ordering::Relaxed);
|
||||
} else {
|
||||
self.index.store(index + 1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
conn
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Backend {
|
||||
pool: RedisPool,
|
||||
}
|
||||
|
||||
impl Backend {
|
||||
pub async fn connect(cfg: RedisConfig) -> Result<Self> {
|
||||
let pool = RedisPool::connect(cfg).await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseLinker for Backend {
|
||||
/// Due to the nature of the key-value setup for redis clients this has completely
|
||||
/// different handling so does not do anything when this funciton is called.
|
||||
async fn ensure_tables(
|
||||
&mut self,
|
||||
_presets: Vec<&str>,
|
||||
_columns: Vec<ImageFormat>,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for Backend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
let key = format!("{:?} {} {} {:?}", file_id, preset, category, format);
|
||||
let mut conn = self.pool.get();
|
||||
let result = conn.get(&key).await;
|
||||
|
||||
let val: Vec<u8> = match result {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("failed to fetch key {} from redis: {:?}", &key, e);
|
||||
return None;
|
||||
},
|
||||
};
|
||||
|
||||
if val.len() == 0 {
|
||||
None
|
||||
} else {
|
||||
let ref_: &[u8] = val.as_ref();
|
||||
Some(BytesMut::from(ref_))
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let mut pairs = Vec::new();
|
||||
|
||||
for (preset, formats) in data {
|
||||
for (format, buff) in formats {
|
||||
let key = format!("{:?} {} {} {:?}", &file_id, &preset, category, format);
|
||||
pairs.push((key, buff.to_vec()));
|
||||
}
|
||||
}
|
||||
|
||||
let mut conn = self.pool.get();
|
||||
conn.set_multiple(&pairs).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, _presets: Vec<&String>) -> Result<()> {
|
||||
let mut conn = self.pool.get();
|
||||
let mut conn2 = self.pool.get();
|
||||
let mut keys: AsyncIter<String> = conn.scan_match(format!("{:?}*", file_id)).await?;
|
||||
while let Some(v) = keys.next_item().await {
|
||||
conn2.del(v).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This is non-functional due to limitations with the key-value setup of redis.
|
||||
async fn list_entities(
|
||||
&self,
|
||||
_filter: FilterType,
|
||||
_order: OrderBy,
|
||||
_page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
Err(anyhow::Error::msg(
|
||||
"redis backend does not support listing entities",
|
||||
))
|
||||
}
|
||||
}
|
|
@ -1,676 +0,0 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
use chrono::Utc;
|
||||
use log::{debug, error, info};
|
||||
use serde::Deserialize;
|
||||
use serde_variant::to_variant_name;
|
||||
use sqlx::mysql::{MySqlPool, MySqlPoolOptions};
|
||||
use sqlx::postgres::{PgPool, PgPoolOptions};
|
||||
use sqlx::sqlite::{SqlitePool, SqlitePoolOptions};
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::configure::PAGE_SIZE;
|
||||
use crate::context::{FilterType, IndexResult, OrderBy};
|
||||
use crate::image::{ImageFormat, ImagePresetsData};
|
||||
use crate::traits::{DatabaseLinker, ImageStore};
|
||||
|
||||
/// The configuration for the SQL based database backends.
|
||||
///
|
||||
/// The `connection_uri` should be formatted as a direct connect
|
||||
/// uri. e.g.
|
||||
/// `postgresql://john:boo@localhost/postgres`
|
||||
///
|
||||
/// The `pool_size` determined the *maximum* amount of pool connections.
|
||||
#[derive(Clone, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
connection_uri: String,
|
||||
pool_size: u32,
|
||||
}
|
||||
|
||||
fn build_select_qry(column: &str, preset: &str, placeholder: &str) -> String {
|
||||
format!(
|
||||
"SELECT {column} FROM {table} WHERE file_id = {placeholder} LIMIT 1;",
|
||||
column = column,
|
||||
table = preset,
|
||||
placeholder = placeholder,
|
||||
)
|
||||
}
|
||||
|
||||
fn build_insert_qry(preset: &str, columns: &Vec<&str>, placeholders: &Vec<String>) -> String {
|
||||
let columns = columns.join(", ");
|
||||
let placeholders = placeholders.join(", ");
|
||||
format!(
|
||||
"INSERT INTO {table} ({columns}) VALUES ({placeholders});",
|
||||
table = preset,
|
||||
columns = columns,
|
||||
placeholders = placeholders,
|
||||
)
|
||||
}
|
||||
|
||||
fn build_delete_queries(presets: &Vec<&String>, placeholder: &str) -> Vec<String> {
|
||||
let mut queries = vec![];
|
||||
for preset in presets {
|
||||
queries.push(format!(
|
||||
"DELETE FROM {table} WHERE file_id = {placeholder};",
|
||||
table = preset,
|
||||
placeholder = placeholder,
|
||||
))
|
||||
}
|
||||
|
||||
queries
|
||||
}
|
||||
|
||||
/// Either extracts the value as a `&[u8]` from the row as `Some(BytesMut)`
|
||||
/// or becomes `None`.
|
||||
macro_rules! extract_or_none {
|
||||
( $e:expr, $c:expr ) => {{
|
||||
match $e {
|
||||
Ok(row) => {
|
||||
let row = row?;
|
||||
let data: &[u8] = row.get($c);
|
||||
Some(BytesMut::from(data))
|
||||
},
|
||||
Err(e) => {
|
||||
error!("failed to fetch row due to error: {:?}", e);
|
||||
None
|
||||
},
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/// Builds a SQL query for the given preset (table) from
|
||||
/// the given data adding place holders for each value for
|
||||
/// prepared statements.
|
||||
macro_rules! build_insert {
|
||||
( $preset:expr, $data:expr, $placeholder:expr ) => {{
|
||||
let mut columns: Vec<&str> = $data
|
||||
.keys()
|
||||
.map(|v| to_variant_name(v).expect("unreachable"))
|
||||
.collect();
|
||||
columns.insert(0, "file_id");
|
||||
|
||||
let values: Vec<BytesMut> = $data.values().map(|v| v.clone()).collect();
|
||||
|
||||
let placeholders: Vec<String> = (1..columns.len() + 1).map($placeholder).collect();
|
||||
|
||||
(build_insert_qry($preset, &columns, &placeholders), values)
|
||||
}};
|
||||
}
|
||||
|
||||
/// Builds a sqlx query based on the given query string and values
|
||||
///
|
||||
/// This also accounts for the file_id being a uuid vs everything else
|
||||
/// being bytes.
|
||||
macro_rules! query_with_parameters {
|
||||
( $id:expr, $qry:expr, $values:expr ) => {{
|
||||
let mut qry = sqlx::query($qry).bind($id);
|
||||
|
||||
for value in $values {
|
||||
qry = qry.bind(value)
|
||||
}
|
||||
|
||||
qry
|
||||
}};
|
||||
}
|
||||
|
||||
/// Deletes a file with a given id from all presets.
|
||||
///
|
||||
/// Due to the nature of the Pool types but the similarity between
|
||||
/// each database code to delete files it makes more sense to put this
|
||||
/// in a macro over a function.
|
||||
macro_rules! delete_file {
|
||||
( $id:expr, $presets:expr, $placeholder:expr, $pool:expr ) => {{
|
||||
let file_id = $id.to_string();
|
||||
let queries = build_delete_queries($presets, $placeholder);
|
||||
|
||||
for qry in queries {
|
||||
let query = sqlx::query(&qry).bind(&file_id);
|
||||
query.execute($pool).await?;
|
||||
}
|
||||
|
||||
let qry = format!(
|
||||
"DELETE FROM image_metadata WHERE file_id = {}",
|
||||
$placeholder,
|
||||
);
|
||||
|
||||
let query = sqlx::query(&qry).bind($id.to_string());
|
||||
query.execute($pool).await?;
|
||||
}};
|
||||
}
|
||||
|
||||
/// Inserts a given file_id into the index table.
|
||||
///
|
||||
/// This table mostly acts as the metadata table for listing files of
|
||||
/// given categories.
|
||||
macro_rules! insert_metadata {
|
||||
( $file_id:expr, $category:expr, $total:expr, $placeholder:expr, $pool:expr, ) => {{
|
||||
let placeholders: String = (1..5).map($placeholder).collect::<Vec<String>>().join(", ");
|
||||
|
||||
let qry = format!(
|
||||
r#"
|
||||
INSERT INTO image_metadata (
|
||||
file_id,
|
||||
category,
|
||||
insert_date,
|
||||
total_size
|
||||
) VALUES ({placeholders})"#,
|
||||
placeholders = placeholders,
|
||||
);
|
||||
|
||||
let now = Utc::now();
|
||||
|
||||
let query = sqlx::query(&qry)
|
||||
.bind($file_id)
|
||||
.bind($category)
|
||||
.bind(now)
|
||||
.bind($total);
|
||||
query.execute($pool).await?;
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! sum_total {
|
||||
( $total:expr, $values:expr ) => {{
|
||||
let sum: i64 = $values.values().map(|v| v.len() as i64).sum();
|
||||
$total += sum;
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! check_category {
|
||||
( $file_id:expr, $category:expr, $ph1:expr, $ph2:expr, $pool:expr ) => {{
|
||||
let qry = format!(
|
||||
"SELECT 1 FROM image_metadata WHERE file_id = {} AND category = {};",
|
||||
$ph1, $ph2,
|
||||
);
|
||||
|
||||
sqlx::query(&qry)
|
||||
.bind($file_id.to_string())
|
||||
.bind($category)
|
||||
.fetch_optional($pool)
|
||||
.await
|
||||
.unwrap_or(None)
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! apply_filter {
|
||||
( $qry:expr, $placeholder:expr, $filter:expr ) => {{
|
||||
match $filter {
|
||||
FilterType::All => (),
|
||||
FilterType::Category(_) => $qry = format!("{} WHERE category = {}", $qry, $placeholder),
|
||||
FilterType::CreationDate(_) => {
|
||||
$qry = format!("{} WHERE insert_date = {}", $qry, $placeholder)
|
||||
},
|
||||
};
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! bind_filter {
|
||||
( $query:expr, $filter:expr ) => {{
|
||||
match $filter {
|
||||
FilterType::All => (),
|
||||
FilterType::Category(v) => $query = $query.bind(v),
|
||||
FilterType::CreationDate(v) => $query = $query.bind(v),
|
||||
};
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! from_rows {
|
||||
( $rows:expr ) => {{
|
||||
$rows
|
||||
.drain(..)
|
||||
.map(|v| IndexResult {
|
||||
file_id: Uuid::from_str(v.get("file_id")).expect("uuid was invalid in database"),
|
||||
category: v.get("category"),
|
||||
total_size: v.get("total_size"),
|
||||
created_on: v.get("insert_date"),
|
||||
})
|
||||
.collect()
|
||||
}};
|
||||
}
|
||||
|
||||
/// A database backend set to handle the PostgreSQL database.
|
||||
pub struct PostgresBackend {
|
||||
pool: PgPool,
|
||||
}
|
||||
|
||||
impl PostgresBackend {
|
||||
/// Connect to the given PostgreSQL server.
|
||||
///
|
||||
/// This will build a connection pool and connect with a maximum
|
||||
/// of n connections determined by the `pool_size` of the given
|
||||
/// config.
|
||||
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(cfg.pool_size)
|
||||
.connect(&cfg.connection_uri)
|
||||
.await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseLinker for PostgresBackend {
|
||||
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
|
||||
info!("building tables");
|
||||
|
||||
let query = sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS image_metadata (
|
||||
file_id CHAR(36) PRIMARY KEY,
|
||||
category TEXT,
|
||||
insert_date TIMESTAMP WITH TIME ZONE,
|
||||
total_size INTEGER
|
||||
)"#,
|
||||
);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
|
||||
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
|
||||
|
||||
for format in formats {
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
columns.push(format!("{} BYTEA", column))
|
||||
}
|
||||
|
||||
for preset in presets {
|
||||
let qry = format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
|
||||
table = preset,
|
||||
columns = columns.join(", ")
|
||||
);
|
||||
|
||||
let query = sqlx::query(&qry);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for PostgresBackend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
check_category!(file_id, category, "$1", "$2", &self.pool)?;
|
||||
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
|
||||
let qry = build_select_qry(column, &preset, "$1");
|
||||
let qry = sqlx::query(&qry).bind(file_id.to_string());
|
||||
|
||||
extract_or_none!(qry.fetch_optional(&self.pool).await, column)
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let mut total: i64 = 0;
|
||||
for (preset, preset_data) in data {
|
||||
sum_total!(total, preset_data);
|
||||
let (qry, values) = build_insert!(&preset, preset_data, |i| format!("${}", i));
|
||||
|
||||
let values_ = values.iter().map(|v| v.as_ref());
|
||||
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
|
||||
query.execute(&self.pool).await?;
|
||||
}
|
||||
|
||||
insert_metadata!(
|
||||
file_id.to_string(),
|
||||
category,
|
||||
total,
|
||||
|i| format!("${}", i),
|
||||
&self.pool,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
|
||||
delete_file!(file_id, &presets, "$1", &self.pool);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
// we start at 1 but the offset should be calculated from 0
|
||||
let skip = PAGE_SIZE * (page as i64 - 1);
|
||||
let order = order.as_str();
|
||||
|
||||
let mut qry = format!(
|
||||
r#"
|
||||
SELECT file_id, category, insert_date, total_size
|
||||
FROM image_metadata
|
||||
ORDER BY {} DESC
|
||||
OFFSET $1
|
||||
LIMIT $2
|
||||
"#,
|
||||
order
|
||||
);
|
||||
|
||||
apply_filter!(qry, "$3", &filter);
|
||||
|
||||
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
|
||||
|
||||
bind_filter!(query, filter);
|
||||
|
||||
let mut rows = query.fetch_all(&self.pool).await?;
|
||||
let results = from_rows!(rows);
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
/// A database backend set to handle the MySQL / MariaDB database.
|
||||
pub struct MySQLBackend {
|
||||
pool: MySqlPool,
|
||||
}
|
||||
|
||||
impl MySQLBackend {
|
||||
/// Connect to the given MySQL / MariaDB server.
|
||||
///
|
||||
/// This will build a connection pool and connect with a maximum
|
||||
/// of n connections determined by the `pool_size` of the given
|
||||
/// config.
|
||||
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
|
||||
let pool = MySqlPoolOptions::new()
|
||||
.max_connections(cfg.pool_size)
|
||||
.connect(&cfg.connection_uri)
|
||||
.await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseLinker for MySQLBackend {
|
||||
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
|
||||
info!("building tables");
|
||||
|
||||
let query = sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS image_metadata (
|
||||
file_id CHAR(36) PRIMARY KEY,
|
||||
category TEXT,
|
||||
insert_date TIMESTAMP,
|
||||
total_size INTEGER
|
||||
)"#,
|
||||
);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
|
||||
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
|
||||
|
||||
for format in formats {
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
columns.push(format!("{} LONGBLOB", column))
|
||||
}
|
||||
|
||||
for preset in presets {
|
||||
let qry = format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
|
||||
table = preset,
|
||||
columns = columns.join(", ")
|
||||
);
|
||||
|
||||
let query = sqlx::query(&qry);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for MySQLBackend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
check_category!(file_id, category, "?", "?", &self.pool)?;
|
||||
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
|
||||
let qry = build_select_qry(column, &preset, "?");
|
||||
let query = sqlx::query(&qry).bind(file_id.to_string());
|
||||
|
||||
extract_or_none!(query.fetch_optional(&self.pool).await, column)
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let mut total: i64 = 0;
|
||||
for (preset, preset_data) in data {
|
||||
sum_total!(total, preset_data);
|
||||
let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string());
|
||||
|
||||
let values_ = values.iter().map(|v| v.as_ref());
|
||||
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
|
||||
query.execute(&self.pool).await?;
|
||||
}
|
||||
|
||||
insert_metadata!(
|
||||
file_id.to_string(),
|
||||
category,
|
||||
total,
|
||||
|_| "?".to_string(),
|
||||
&self.pool,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
|
||||
delete_file!(file_id, &presets, "?", &self.pool);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
// we start at 1 but the offset should be calculated from 0
|
||||
let skip = PAGE_SIZE * (page as i64 - 1);
|
||||
let order = order.as_str();
|
||||
|
||||
let mut qry = format!(
|
||||
r#"
|
||||
SELECT file_id, category, insert_date, total_size
|
||||
FROM image_metadata
|
||||
ORDER BY {} DESC
|
||||
LIMIT ?, ?
|
||||
"#,
|
||||
order
|
||||
);
|
||||
|
||||
apply_filter!(qry, "?", &filter);
|
||||
|
||||
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
|
||||
|
||||
bind_filter!(query, filter);
|
||||
|
||||
let mut rows = query.fetch_all(&self.pool).await?;
|
||||
let results = from_rows!(rows);
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
/// A database backend set to handle the Sqlite database.
|
||||
///
|
||||
/// Due to the nature of SQLite this is *not* recommended for use
|
||||
/// in production being a single file. Consider using something like
|
||||
/// PostgreSQL or Cassandra in production.
|
||||
///
|
||||
/// This backend requires that the system uses a standard File approach e.g.
|
||||
/// not im memory / shared memory due to the sqlx::Pool handling.
|
||||
/// If in-memory is used this can produce undefined behaviour in terms
|
||||
/// of what data is perceived to be stored.
|
||||
pub struct SqliteBackend {
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl SqliteBackend {
|
||||
/// Connect to the given Sqlite file.
|
||||
///
|
||||
/// This will build a connection pool and connect with a maximum
|
||||
/// of n connections determined by the `pool_size` of the given
|
||||
/// config.
|
||||
///
|
||||
/// Due to the nature of this being a pool setup, in-memory setups are
|
||||
/// not supported.
|
||||
pub async fn connect(cfg: DatabaseConfig) -> Result<Self> {
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.max_connections(cfg.pool_size)
|
||||
.connect(&cfg.connection_uri)
|
||||
.await?;
|
||||
|
||||
info!("successfully connected to sqlite");
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseLinker for SqliteBackend {
|
||||
async fn ensure_tables(&mut self, presets: Vec<&str>, formats: Vec<ImageFormat>) -> Result<()> {
|
||||
info!("building tables");
|
||||
|
||||
let query = sqlx::query(
|
||||
"
|
||||
CREATE TABLE IF NOT EXISTS image_metadata (
|
||||
file_id CHAR(36) PRIMARY KEY,
|
||||
category TEXT,
|
||||
insert_date TEXT,
|
||||
total_size INTEGER
|
||||
)",
|
||||
);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
info!("metadata table created successfully");
|
||||
|
||||
let mut columns = vec![format!("file_id CHAR(36) PRIMARY KEY")];
|
||||
|
||||
for format in formats {
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
columns.push(format!("{} BLOB", column))
|
||||
}
|
||||
|
||||
for preset in presets {
|
||||
let qry = format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table} ({columns})",
|
||||
table = preset,
|
||||
columns = columns.join(", ")
|
||||
);
|
||||
|
||||
let query = sqlx::query(&qry);
|
||||
|
||||
query.execute(&self.pool).await?;
|
||||
|
||||
debug!("created preset table {}", preset);
|
||||
}
|
||||
info!("all preset tables created successfully");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for SqliteBackend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
check_category!(file_id, category, "?", "?", &self.pool)?;
|
||||
|
||||
let column = to_variant_name(&format).expect("unreachable");
|
||||
|
||||
let qry = build_select_qry(column, &preset, "?");
|
||||
let query = sqlx::query(&qry).bind(file_id.to_string());
|
||||
|
||||
extract_or_none!(query.fetch_optional(&self.pool).await, column)
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let mut total: i64 = 0;
|
||||
for (preset, preset_data) in data {
|
||||
sum_total!(total, preset_data);
|
||||
|
||||
let (qry, values) = build_insert!(&preset, preset_data, |_| "?".to_string());
|
||||
|
||||
let values_ = values.iter().map(|v| v.as_ref());
|
||||
let query = query_with_parameters!(file_id.to_string(), &qry, values_);
|
||||
query.execute(&self.pool).await?;
|
||||
}
|
||||
|
||||
insert_metadata!(
|
||||
file_id.to_string(),
|
||||
category,
|
||||
total,
|
||||
|_| "?".to_string(),
|
||||
&self.pool,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
|
||||
delete_file!(file_id, &presets, "?", &self.pool);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
// we start at 1 but the offset should be calculated from 0
|
||||
let skip = PAGE_SIZE * (page as i64 - 1);
|
||||
let order = match order {
|
||||
OrderBy::CreationDate => "datetime(insert_date)",
|
||||
OrderBy::TotalSize => "total_size",
|
||||
};
|
||||
|
||||
let mut qry = format!(
|
||||
r#"
|
||||
SELECT file_id, category, insert_date, total_size
|
||||
FROM image_metadata
|
||||
ORDER BY {} DESC
|
||||
LIMIT ?, ?;
|
||||
"#,
|
||||
order
|
||||
);
|
||||
|
||||
apply_filter!(qry, "?", &filter);
|
||||
|
||||
let mut query = sqlx::query(&qry).bind(skip).bind(PAGE_SIZE);
|
||||
|
||||
bind_filter!(query, filter);
|
||||
|
||||
let mut rows = query.fetch_all(&self.pool).await?;
|
||||
let results = from_rows!(rows);
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
99
src/cache.rs
|
@ -1,58 +1,59 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use concread::arcache::{ARCache, ARCacheBuilder};
|
||||
use std::ops::Deref;
|
||||
use anyhow::anyhow;
|
||||
use bytes::Bytes;
|
||||
use once_cell::sync::OnceCell;
|
||||
use uuid::Uuid;
|
||||
use crate::config::CacheConfig;
|
||||
|
||||
use crate::image::ImageFormat;
|
||||
static GLOBAL_CACHE: OnceCell<Cache> = OnceCell::new();
|
||||
|
||||
/// The key that acts as the hashed key.
|
||||
pub type CacheKey = (Uuid, String, ImageFormat);
|
||||
|
||||
/// Cheaply cloneable lock around a LRU cache.
|
||||
pub type CacheStore = Arc<ARCache<CacheKey, BytesMut>>;
|
||||
|
||||
pub static CACHE_STATE: OnceCell<CacheState> = OnceCell::new();
|
||||
|
||||
/// A wrapper around the `CacheStore` type letting it be put into Gotham's
|
||||
/// shared state.
|
||||
#[derive(Clone)]
|
||||
pub struct CacheState(pub Option<CacheStore>);
|
||||
|
||||
impl CacheState {
|
||||
/// Creates a new cache state instance with a given size.
|
||||
pub fn init(cache_size: usize) {
|
||||
let inst = if cache_size == 0 {
|
||||
Self { 0: None }
|
||||
} else {
|
||||
let store = Arc::new(ARCacheBuilder::new()
|
||||
.set_size(cache_size, 12)
|
||||
.build()
|
||||
.unwrap()
|
||||
);
|
||||
Self { 0: Some(store) }
|
||||
};
|
||||
|
||||
let _ = CACHE_STATE.set(inst);
|
||||
pub fn new_cache(cfg: CacheConfig) -> anyhow::Result<Option<Cache>> {
|
||||
if cfg.max_capacity.is_some() && cfg.max_images.is_some() {
|
||||
return Err(anyhow!("Cache must be *either* based off of number of images or amount of memory, not both."))
|
||||
} else if cfg.max_capacity.is_none() && cfg.max_images.is_none() {
|
||||
return Ok(None)
|
||||
}
|
||||
|
||||
/// Get a item from the cache if it exists otherwise returns None.
|
||||
pub fn get(&self, file_id: Uuid, preset: String, format: ImageFormat) -> Option<BytesMut> {
|
||||
let state = self.0.as_ref()?;
|
||||
let ref_val = (file_id, preset, format);
|
||||
let mut target = state.read();
|
||||
target.get(&ref_val).map(|v| v.clone())
|
||||
let mut cache = moka::sync::CacheBuilder::default();
|
||||
if let Some(max_items) = cfg.max_images {
|
||||
cache = cache.max_capacity(max_items as u64)
|
||||
}
|
||||
|
||||
/// Adds an item to the cache, if the cache size is already at it's limit
|
||||
/// the least recently used (LRU) item is removed.
|
||||
pub fn set(&self, file_id: Uuid, preset: String, format: ImageFormat, data: BytesMut) {
|
||||
if let Some(state) = self.0.as_ref() {
|
||||
let ref_val = (file_id, preset, format);
|
||||
let mut target = state.write();
|
||||
target.insert(ref_val, data);
|
||||
target.commit();
|
||||
}
|
||||
if let Some(max_memory) = cfg.max_capacity {
|
||||
cache = cache
|
||||
.weigher(|k: &String, v: &Bytes| (k.len() + v.len()) as u32)
|
||||
.max_capacity((max_memory * 1024 * 1024) as u64);
|
||||
}
|
||||
|
||||
Ok(Some(cache.build().into()))
|
||||
}
|
||||
|
||||
pub fn init_cache(cfg: CacheConfig) -> anyhow::Result<()> {
|
||||
if let Some(cache) = new_cache(cfg)? {
|
||||
let _ = GLOBAL_CACHE.set(cache);
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn global_cache<'a>() -> Option<&'a Cache> {
|
||||
GLOBAL_CACHE.get()
|
||||
}
|
||||
|
||||
pub struct Cache {
|
||||
inner: moka::sync::Cache<String, Bytes>,
|
||||
}
|
||||
|
||||
impl Deref for Cache {
|
||||
type Target = moka::sync::Cache<String, Bytes>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl From<moka::sync::Cache<String, Bytes>> for Cache {
|
||||
fn from(v: moka::sync::Cache<String, Bytes>) -> Self {
|
||||
Self {
|
||||
inner: v
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,422 @@
|
|||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use anyhow::{anyhow, Result};
|
||||
use image::ImageFormat;
|
||||
use image::imageops::FilterType;
|
||||
use once_cell::sync::OnceCell;
|
||||
use serde::Deserialize;
|
||||
use poem_openapi::Enum;
|
||||
use crate::pipelines::ProcessingMode;
|
||||
|
||||
use crate::storage::backends::BackendConfigs;
|
||||
|
||||
static CONFIG: OnceCell<RuntimeConfig> = OnceCell::new();
|
||||
|
||||
pub fn config() -> &'static RuntimeConfig {
|
||||
CONFIG.get().expect("config init")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn init_test(data: &str) -> Result<()> {
|
||||
let cfg: RuntimeConfig = serde_yaml::from_str(data)?;
|
||||
dbg!(&cfg); // Useful for failed test debugging
|
||||
let _ = CONFIG.set(cfg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn init(config_file: &Path) -> Result<()> {
|
||||
let file = tokio::fs::read(config_file).await?;
|
||||
|
||||
if let Some(ext) = config_file.extension() {
|
||||
let ext = ext.to_string_lossy().to_string();
|
||||
let cfg: RuntimeConfig = match ext.as_str() {
|
||||
"json" => serde_json::from_slice(&file)?,
|
||||
"yaml" => serde_yaml::from_slice(&file)?,
|
||||
"yml" => serde_yaml::from_slice(&file)?,
|
||||
_ => return Err(anyhow!("Config file must have an extension of either `.json`,`.yaml` or `.yml`"))
|
||||
};
|
||||
|
||||
validate(&cfg)?;
|
||||
let _ = CONFIG.set(cfg);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("Config file must have an extension of either `.json` or `.yaml`"))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn validate(cfg: &RuntimeConfig) -> Result<()> {
|
||||
for (name, cfg) in cfg.buckets.iter() {
|
||||
if !cfg.formats.png
|
||||
&& !cfg.formats.jpeg
|
||||
&& !cfg.formats.gif
|
||||
&& !cfg.formats.webp
|
||||
{
|
||||
return Err(anyhow!("Bucket {} is invalid: At least one encoding format must be enabled.", name))
|
||||
}
|
||||
|
||||
if let Some(ref def) = cfg.default_serving_preset {
|
||||
if !cfg.presets.contains_key(def) {
|
||||
return Err(anyhow!("Bucket {} is invalid: Default serving preset does not exist.", name))
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(default_format) = cfg.default_serving_format {
|
||||
if !cfg.formats.is_enabled(default_format) {
|
||||
return Err(anyhow!("Bucket {} is invalid: Default serving format is not an enabled encoding format.", name))
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.presets.keys().any(|v| v == "original") {
|
||||
return Err(anyhow!("Bucket {} is invalid: The `original` preset name is reserved.", name))
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RuntimeConfig {
|
||||
/// The set storage backend configuration.
|
||||
pub backend: BackendConfigs,
|
||||
|
||||
/// A set of bucket configs.
|
||||
///
|
||||
/// Each bucket represents a category.
|
||||
pub buckets: HashMap<String, BucketConfig>,
|
||||
|
||||
/// The base path to serve images from.
|
||||
///
|
||||
/// Defaults to `/`.
|
||||
pub base_serving_path: Option<String>,
|
||||
|
||||
/// The global cache handler.
|
||||
///
|
||||
/// This will be the fallback handler if any buckets are not
|
||||
/// assigned a dedicated cache config.
|
||||
///
|
||||
/// If this is `None` then no caching is performed.
|
||||
pub global_cache: Option<CacheConfig>,
|
||||
|
||||
/// The *global* max upload size allowed in KB.
|
||||
///
|
||||
/// This takes precedence over bucket level limits.
|
||||
pub max_upload_size: Option<usize>,
|
||||
|
||||
/// The global max concurrency.
|
||||
///
|
||||
/// This takes precedence over bucket level limits.
|
||||
pub max_concurrency: Option<usize>,
|
||||
}
|
||||
|
||||
impl RuntimeConfig {
|
||||
#[inline]
|
||||
pub fn valid_global_size(&self, size: usize) -> bool {
|
||||
self
|
||||
.max_upload_size
|
||||
.map(|limit| size <= (limit * 1024))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize)]
|
||||
pub struct CacheConfig {
|
||||
/// The maximum amount of images to cache.
|
||||
///
|
||||
/// If set to `None` then this will fall back to capacity
|
||||
/// based caching.
|
||||
///
|
||||
/// If both entries are `None` then the item is not cached.
|
||||
pub max_images: Option<u16>,
|
||||
|
||||
/// The maximum amount of memory (approximately) in MB.
|
||||
///
|
||||
/// If set to `None` then this will fall back to
|
||||
/// number of entries based caching.
|
||||
///
|
||||
/// If both entries are `None` then the item is not cached.
|
||||
pub max_capacity: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct BucketConfig {
|
||||
#[serde(default)]
|
||||
/// The processing mode for the given bucket.
|
||||
///
|
||||
/// See `config::ProcessingMode` for more.
|
||||
pub mode: ProcessingMode,
|
||||
|
||||
/// The given image format optimisation config.
|
||||
pub formats: ImageFormats,
|
||||
|
||||
/// The default format to serve images as.
|
||||
///
|
||||
/// Defaults to the first enabled encoding format.
|
||||
pub default_serving_format: Option<ImageKind>,
|
||||
|
||||
/// The default resizing preset to serve images as.
|
||||
///
|
||||
/// Defaults to the original image size.
|
||||
pub default_serving_preset: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
/// A set of resizing presets, this allows resizing dimensions to be accessed
|
||||
/// via a name. E.g. "small", "medium", "large", etc...
|
||||
pub presets: HashMap<String, ResizingConfig>,
|
||||
|
||||
/// A local cache config.
|
||||
///
|
||||
/// If `None` this will use the global handler.
|
||||
pub cache: Option<CacheConfig>,
|
||||
|
||||
/// The max upload size allowed for this bucket in KB.
|
||||
pub max_upload_size: Option<u32>,
|
||||
|
||||
/// The per-bucket max concurrency.
|
||||
pub max_concurrency: Option<usize>,
|
||||
}
|
||||
|
||||
impl BucketConfig {
|
||||
#[inline]
|
||||
pub fn sizing_preset_ids(&self) -> Vec<u32> {
|
||||
self.presets.keys()
|
||||
.map(crate::utils::crc_hash)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Enum, Eq, PartialEq, Deserialize, strum::AsRefStr)]
|
||||
#[oai(rename_all = "lowercase")]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ImageKind {
|
||||
/// The PNG encoding format.
|
||||
Png,
|
||||
|
||||
/// The JPEG encoding format.
|
||||
Jpeg,
|
||||
|
||||
/// The WebP encoding format.
|
||||
Webp,
|
||||
|
||||
/// The GIF encoding format.
|
||||
Gif,
|
||||
}
|
||||
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<image::ImageFormat> for ImageKind {
|
||||
fn into(self) -> ImageFormat {
|
||||
match self {
|
||||
Self::Png => image::ImageFormat::Png,
|
||||
Self::Jpeg => image::ImageFormat::Jpeg,
|
||||
Self::Gif => image::ImageFormat::Gif,
|
||||
Self::Webp => image::ImageFormat::WebP,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ImageKind {
|
||||
pub fn from_content_type(kind: &str) -> Option<Self> {
|
||||
match kind {
|
||||
"image/png" => Some(Self::Png),
|
||||
"image/jpeg" => Some(Self::Jpeg),
|
||||
"image/gif" => Some(Self::Gif),
|
||||
"image/webp" => Some(Self::Webp),
|
||||
"png" => Some(Self::Png),
|
||||
"jpeg" => Some(Self::Jpeg),
|
||||
"gif" => Some(Self::Gif),
|
||||
"webp" => Some(Self::Webp),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_guessed_format(fmt: image::ImageFormat) -> Option<Self> {
|
||||
match fmt {
|
||||
image::ImageFormat::Png => Some(Self::Png),
|
||||
image::ImageFormat::Jpeg => Some(Self::Jpeg),
|
||||
image::ImageFormat::Gif => Some(Self::Gif),
|
||||
image::ImageFormat::WebP => Some(Self::Webp),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_content_type(&self) -> String {
|
||||
format!("image/{}", self.as_file_extension())
|
||||
}
|
||||
|
||||
pub fn as_file_extension(&self) -> &'static str {
|
||||
match self {
|
||||
ImageKind::Png => "png",
|
||||
ImageKind::Jpeg => "jpeg",
|
||||
ImageKind::Webp => "webp",
|
||||
ImageKind::Gif => "gif",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn variants() -> &'static [Self] {
|
||||
&[
|
||||
Self::Png,
|
||||
Self::Jpeg,
|
||||
Self::Gif,
|
||||
Self::Webp,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize)]
|
||||
pub struct ImageFormats {
|
||||
#[serde(default = "default_true")]
|
||||
/// Enable PNG re-encoding.
|
||||
///
|
||||
/// Defaults to `true`.
|
||||
pub png: bool,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
/// Enable JPEG re-encoding.
|
||||
///
|
||||
/// Defaults to `true`.
|
||||
pub jpeg: bool,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
/// Enable WebP re-encoding.
|
||||
///
|
||||
/// Defaults to `true`.
|
||||
pub webp: bool,
|
||||
|
||||
#[serde(default)]
|
||||
/// Enable gif re-encoding.
|
||||
///
|
||||
/// This is generally quite a slow encoder and generally
|
||||
/// not recommended for most buckets.
|
||||
///
|
||||
/// Defaults to `false`.
|
||||
pub gif: bool,
|
||||
|
||||
#[serde(default)]
|
||||
/// The (optional) webp encoder config.
|
||||
///
|
||||
/// This is used for fine-tuning the webp encoder for a desired size and
|
||||
/// performance behavour.
|
||||
pub webp_config: WebpConfig,
|
||||
|
||||
#[serde(default = "default_original_format")]
|
||||
/// The format to encode and store the original image as.
|
||||
///
|
||||
/// This is only used for the JIT and Realtime processing modes
|
||||
/// and will default to PNG encoding if empty.
|
||||
pub original_image_store_format: ImageKind,
|
||||
}
|
||||
|
||||
impl ImageFormats {
|
||||
pub fn is_enabled(&self, kind: ImageKind) -> bool {
|
||||
match kind {
|
||||
ImageKind::Png => self.png,
|
||||
ImageKind::Jpeg => self.jpeg,
|
||||
ImageKind::Webp => self.webp,
|
||||
ImageKind::Gif => self.gif,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn first_enabled_format(&self) -> ImageKind {
|
||||
if self.png {
|
||||
return ImageKind::Png
|
||||
}
|
||||
|
||||
if self.jpeg {
|
||||
return ImageKind::Jpeg
|
||||
}
|
||||
|
||||
if self.webp {
|
||||
return ImageKind::Webp
|
||||
}
|
||||
|
||||
if self.gif {
|
||||
return ImageKind::Gif
|
||||
}
|
||||
|
||||
panic!("Invalid configuration, expected at least one enabled format.")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Default, Deserialize)]
|
||||
pub struct WebpConfig {
|
||||
/// The ratio of lossy compression for webp images
|
||||
/// from 0.0 to 100.0 inclusive for minimal and maximal quality respectively.
|
||||
///
|
||||
/// This can be set to null to put the encoder into lossless compression mode.
|
||||
pub quality: Option<f32>,
|
||||
|
||||
/// with lossless encoding is the ratio of compression to speed.
|
||||
/// If using lossy encoding this does nothing - (float: 0.0 - 100.0 inclusive).
|
||||
pub compression: Option<f32>,
|
||||
|
||||
/// The quality/speed trade-off (0=fast, 6=slower-better)
|
||||
pub method: Option<u8>,
|
||||
|
||||
#[serde(default)]
|
||||
/// A bool singling if multi-threading encoding should be attempted.
|
||||
pub threading: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ResizingFilter {
|
||||
/// Nearest Neighbor
|
||||
Nearest,
|
||||
|
||||
/// Linear Filter
|
||||
Triangle,
|
||||
|
||||
/// Cubic Filter
|
||||
CatmullRom,
|
||||
|
||||
/// Gaussian Filter
|
||||
Gaussian,
|
||||
|
||||
/// Lanczos with window 3
|
||||
Lanczos3,
|
||||
}
|
||||
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<image::imageops::FilterType> for ResizingFilter {
|
||||
fn into(self) -> FilterType {
|
||||
match self {
|
||||
ResizingFilter::Nearest => FilterType::Nearest,
|
||||
ResizingFilter::Triangle => FilterType::Triangle,
|
||||
ResizingFilter::CatmullRom => FilterType::CatmullRom,
|
||||
ResizingFilter::Gaussian => FilterType::Gaussian,
|
||||
ResizingFilter::Lanczos3 => FilterType::Lanczos3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ResizingFilter {
|
||||
fn default() -> Self {
|
||||
Self::Nearest
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Default, Deserialize)]
|
||||
pub struct ResizingConfig {
|
||||
/// The width to resize the image to.
|
||||
pub width: u32,
|
||||
|
||||
/// The height to resize the image to.
|
||||
pub height: u32,
|
||||
|
||||
#[serde(default)]
|
||||
/// The resizing filter algorithm to use.
|
||||
///
|
||||
/// Defaults to nearest neighbour.
|
||||
pub filter: ResizingFilter,
|
||||
}
|
||||
|
||||
const fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
const fn default_original_format() -> ImageKind {
|
||||
ImageKind::Png
|
||||
}
|
||||
|
147
src/configure.rs
|
@ -1,147 +0,0 @@
|
|||
use std::fs::read_to_string;
|
||||
use std::sync::Arc;
|
||||
|
||||
use gotham_derive::StateData;
|
||||
use hashbrown::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::image::ImageFormat;
|
||||
use crate::storage::DatabaseBackend;
|
||||
|
||||
/// The size of the pages when listing indexes via the admin panel.
|
||||
pub const PAGE_SIZE: i64 = 50;
|
||||
|
||||
/// A cheaply cloneable version of the given configuration
|
||||
/// for shared state middleware.
|
||||
#[derive(Clone, StateData)]
|
||||
pub struct StateConfig(pub Arc<Config>);
|
||||
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum LogLevel {
|
||||
Off,
|
||||
Info,
|
||||
Debug,
|
||||
Error,
|
||||
}
|
||||
|
||||
/// A given size of a preset.
|
||||
/// Any uploaded images will be automatically duplicated and resized in this
|
||||
/// preset.
|
||||
#[derive(Deserialize)]
|
||||
pub struct SizingPreset {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Config {
|
||||
pub log_level: LogLevel,
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub base_data_path: String,
|
||||
pub formats: HashMap<ImageFormat, bool>,
|
||||
pub database_backend: DatabaseBackend,
|
||||
pub size_presets: HashMap<String, SizingPreset>,
|
||||
pub default_serving_preset: String,
|
||||
pub default_serving_format: ImageFormat,
|
||||
pub webp_quality: Option<f32>,
|
||||
pub webp_compression: Option<f32>,
|
||||
pub webp_method: Option<u8>,
|
||||
pub webp_threading: Option<bool>,
|
||||
pub cache_size: usize,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_file(file: &str) -> anyhow::Result<Self> {
|
||||
let data = read_to_string(file)?;
|
||||
Ok(serde_json::from_str::<Self>(&data)?)
|
||||
}
|
||||
|
||||
pub fn template(backend: &str) -> anyhow::Result<serde_json::Value> {
|
||||
let config = match backend.to_lowercase().as_str() {
|
||||
"redis" => json!({
|
||||
"type": "redis",
|
||||
"config": {
|
||||
"connection_uri": "redis://user:pass@localhost/0",
|
||||
"pool_size": 12,
|
||||
}
|
||||
}),
|
||||
"cassandra" => json!({
|
||||
"type": "cassandra",
|
||||
"config": {
|
||||
"clusters": [
|
||||
"ip:port",
|
||||
"ip:port",
|
||||
"ip:port",
|
||||
],
|
||||
"keyspace": {
|
||||
"strategy": "SimpleStrategy",
|
||||
"spec": {
|
||||
"replication_factor": 3
|
||||
}
|
||||
},
|
||||
"user": "",
|
||||
"password": "",
|
||||
}
|
||||
}),
|
||||
"postgres" => json!({
|
||||
"type": "postgres",
|
||||
"config": {
|
||||
"connection_uri": "postgres://user:pass@localhost/foo",
|
||||
"pool_size": 10,
|
||||
}
|
||||
}),
|
||||
"mysql" => json!({
|
||||
"type": "mysql",
|
||||
"config": {
|
||||
"connection_uri": "mysql://user:pass@localhost/foo",
|
||||
"pool_size": 10,
|
||||
}
|
||||
}),
|
||||
"sqlite" => json!({
|
||||
"type": "sqlite",
|
||||
"config": {
|
||||
"connection_uri": "sqlite://database.db",
|
||||
"pool_size": 10,
|
||||
}
|
||||
}),
|
||||
_ => return Err(anyhow::Error::msg("invalid database backend given")),
|
||||
};
|
||||
|
||||
Ok(json!({
|
||||
"log_level": LogLevel::Info,
|
||||
"host": "127.0.0.1",
|
||||
"port": 7070,
|
||||
"base_data_path": "/images",
|
||||
"formats": {
|
||||
"png": true,
|
||||
"jpeg": true,
|
||||
"gif": false,
|
||||
"webp": true,
|
||||
},
|
||||
"database_backend": config,
|
||||
"size_presets": {
|
||||
"small": {
|
||||
"width": 32,
|
||||
"height": 32,
|
||||
},
|
||||
"medium": {
|
||||
"width": 64,
|
||||
"height": 64,
|
||||
},
|
||||
"large": {
|
||||
"width": 128,
|
||||
"height": 128,
|
||||
},
|
||||
},
|
||||
"default_serving_preset": "original",
|
||||
"default_serving_format": "webp",
|
||||
"webp_quality": None::<f32>,
|
||||
"webp_compression": Some(50),
|
||||
"webp_method": Some(4),
|
||||
"webp_threading": Some(true),
|
||||
"cache_size": 500,
|
||||
}))
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A set of filters that can be used to view
|
||||
/// entities via the REST API on the admin panel.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```json
|
||||
/// {
|
||||
/// "filter": {
|
||||
/// "filter_type": "category",
|
||||
/// "with_value": "cats",
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase", tag = "filter_type", content = "with_value")]
|
||||
pub enum FilterType {
|
||||
All,
|
||||
Category(String),
|
||||
CreationDate(DateTime<Utc>),
|
||||
}
|
||||
|
||||
/// How the data should be ordered when requesting the
|
||||
/// index list.
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum OrderBy {
|
||||
CreationDate,
|
||||
TotalSize,
|
||||
}
|
||||
|
||||
impl OrderBy {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
OrderBy::CreationDate => "insert_date",
|
||||
OrderBy::TotalSize => "total_size",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A result when listing all items in the server.
|
||||
#[derive(Serialize)]
|
||||
pub struct IndexResult {
|
||||
pub file_id: Uuid,
|
||||
pub category: String,
|
||||
pub total_size: i32,
|
||||
pub created_on: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct FilesListPayload {
|
||||
pub filter: Option<FilterType>,
|
||||
pub order: Option<OrderBy>,
|
||||
pub page: Option<usize>,
|
||||
}
|
|
@ -0,0 +1,317 @@
|
|||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
use bytes::Bytes;
|
||||
use once_cell::sync::OnceCell;
|
||||
use uuid::Uuid;
|
||||
use poem_openapi::Object;
|
||||
use tokio::sync::{Semaphore, SemaphorePermit};
|
||||
use tokio::time::Instant;
|
||||
use crate::cache::{Cache, global_cache};
|
||||
|
||||
use crate::config::{BucketConfig, ImageKind};
|
||||
use crate::pipelines::{PipelineController, ProcessingMode, StoreEntry};
|
||||
use crate::storage::template::StorageBackend;
|
||||
|
||||
static BUCKETS: OnceCell<hashbrown::HashMap<u32, BucketController>> = OnceCell::new();
|
||||
|
||||
pub fn init_buckets(buckets: hashbrown::HashMap<u32, BucketController>) {
|
||||
let _ = BUCKETS.set(buckets);
|
||||
}
|
||||
|
||||
pub fn get_bucket_by_id(bucket_id: u32) -> Option<&'static BucketController> {
|
||||
BUCKETS.get_or_init(hashbrown::HashMap::new).get(&bucket_id)
|
||||
}
|
||||
|
||||
pub fn get_bucket_by_name(bucket: impl Hash) -> Option<&'static BucketController> {
|
||||
let bucket_id = crate::utils::crc_hash(bucket);
|
||||
get_bucket_by_id(bucket_id)
|
||||
}
|
||||
|
||||
async fn get_optional_permit<'a>(
|
||||
global: &'a Option<Arc<Semaphore>>,
|
||||
local: &'a Option<Semaphore>,
|
||||
) -> anyhow::Result<Option<SemaphorePermit<'a>>> {
|
||||
if let Some(limiter) = global {
|
||||
return Ok(Some(limiter.acquire().await?))
|
||||
}
|
||||
|
||||
if let Some(limiter) = local {
|
||||
return Ok(Some(limiter.acquire().await?))
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
|
||||
#[derive(Object, Debug)]
|
||||
pub struct ImageUploadInfo {
|
||||
/// The computed image sizing id.
|
||||
///
|
||||
/// This is useful for tracking files outside of lust as this is
|
||||
/// generally used for filtering within the storage systems.
|
||||
sizing_id: u32,
|
||||
}
|
||||
|
||||
#[derive(Object, Debug)]
|
||||
pub struct UploadInfo {
|
||||
/// The generated ID for the file.
|
||||
///
|
||||
/// This can be used to access the file for the given bucket.
|
||||
image_id: Uuid,
|
||||
|
||||
/// The time spent processing the image in seconds.
|
||||
processing_time: f32,
|
||||
|
||||
/// The crc32 checksum of the uploaded image.
|
||||
checksum: u32,
|
||||
|
||||
/// The information that is specific to the image.
|
||||
images: Vec<ImageUploadInfo>,
|
||||
|
||||
/// The id of the bucket the image was stored in.
|
||||
///
|
||||
/// This is useful for tracking files outside of lust as this is
|
||||
/// generally used for filtering within the storage systems.
|
||||
bucket_id: u32,
|
||||
}
|
||||
|
||||
pub struct BucketController {
|
||||
bucket_id: u32,
|
||||
cache: Option<Cache>,
|
||||
global_limiter: Option<Arc<Semaphore>>,
|
||||
config: BucketConfig,
|
||||
pipeline: PipelineController,
|
||||
storage: Arc<dyn StorageBackend>,
|
||||
limiter: Option<Semaphore>,
|
||||
}
|
||||
|
||||
impl BucketController {
|
||||
pub fn new(
|
||||
bucket_id: u32,
|
||||
cache: Option<Cache>,
|
||||
global_limiter: Option<Arc<Semaphore>>,
|
||||
config: BucketConfig,
|
||||
pipeline: PipelineController,
|
||||
storage: Arc<dyn StorageBackend>,
|
||||
) -> Self {
|
||||
Self {
|
||||
bucket_id,
|
||||
cache,
|
||||
global_limiter,
|
||||
limiter: config.max_concurrency.map(Semaphore::new),
|
||||
config,
|
||||
pipeline,
|
||||
storage,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn cfg(&self) -> &BucketConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
pub async fn upload(&self, kind: ImageKind, data: Vec<u8>) -> anyhow::Result<UploadInfo> {
|
||||
debug!("Uploading processed image with kind: {:?} and is {} bytes in size.", kind, data.len());
|
||||
|
||||
let _permit = get_optional_permit(&self.global_limiter, &self.limiter).await?;
|
||||
let start = Instant::now();
|
||||
|
||||
let checksum = crc32fast::hash(&data);
|
||||
let pipeline = self.pipeline.clone();
|
||||
let result = tokio::task::spawn_blocking(move || {
|
||||
pipeline.on_upload(kind, data)
|
||||
}).await??;
|
||||
|
||||
let mut image_upload_info = vec![];
|
||||
let image_id = Uuid::new_v4();
|
||||
for store_entry in result.result.to_store {
|
||||
self.storage
|
||||
.store(
|
||||
self.bucket_id,
|
||||
image_id,
|
||||
store_entry.kind,
|
||||
store_entry.sizing_id,
|
||||
store_entry.data.clone(),
|
||||
).await?;
|
||||
|
||||
image_upload_info.push(ImageUploadInfo { sizing_id: store_entry.sizing_id });
|
||||
if let Some(ref cache) = self.cache {
|
||||
let cache_key = self.cache_key(
|
||||
store_entry.sizing_id,
|
||||
image_id,
|
||||
store_entry.kind,
|
||||
);
|
||||
|
||||
cache.insert(cache_key, store_entry.data);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(UploadInfo {
|
||||
checksum,
|
||||
image_id,
|
||||
bucket_id: self.bucket_id,
|
||||
images: image_upload_info,
|
||||
processing_time: start.elapsed().as_secs_f32(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn fetch(
|
||||
&self,
|
||||
image_id: Uuid,
|
||||
desired_kind: ImageKind,
|
||||
size_preset: Option<String>,
|
||||
custom_sizing: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<Option<StoreEntry>> {
|
||||
debug!(
|
||||
"Fetching image with image_id: {}, desired_kind: {:?}, preset: {:?}, custom_sizing: {:?}.",
|
||||
image_id, desired_kind, &size_preset, &custom_sizing,
|
||||
);
|
||||
|
||||
let _permit = get_optional_permit(&self.global_limiter, &self.limiter).await?;
|
||||
|
||||
let sizing = size_preset
|
||||
.map(Some)
|
||||
.unwrap_or_else(|| self.config.default_serving_preset.clone());
|
||||
|
||||
let sizing_id = if let Some(sizing_preset) = sizing {
|
||||
if sizing_preset == "original" {
|
||||
0
|
||||
} else {
|
||||
crate::utils::crc_hash(sizing_preset)
|
||||
}
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// In real time situations
|
||||
let fetch_kind = if self.config.mode == ProcessingMode::Realtime {
|
||||
self.config.formats.original_image_store_format
|
||||
} else {
|
||||
desired_kind
|
||||
};
|
||||
|
||||
let maybe_existing = self.caching_fetch(
|
||||
image_id,
|
||||
fetch_kind,
|
||||
if self.config.mode == ProcessingMode::Realtime { 0 } else { sizing_id },
|
||||
).await?;
|
||||
|
||||
let (data, retrieved_kind) = match maybe_existing {
|
||||
// If we're in JIT mode we want to re-encode the image and store it.
|
||||
None => if self.config.mode == ProcessingMode::Jit {
|
||||
let base_kind = self.config.formats.original_image_store_format;
|
||||
let value = self.caching_fetch(
|
||||
image_id,
|
||||
base_kind,
|
||||
0,
|
||||
).await?;
|
||||
|
||||
match value {
|
||||
None => return Ok(None),
|
||||
Some(original) => (original, base_kind)
|
||||
}
|
||||
} else {
|
||||
return Ok(None)
|
||||
},
|
||||
Some(computed) => (computed, fetch_kind),
|
||||
};
|
||||
|
||||
// Small optimisation here when in AOT mode to avoid
|
||||
// spawning additional threads.
|
||||
if self.config.mode == ProcessingMode::Aot {
|
||||
return Ok(Some(StoreEntry { data, kind: retrieved_kind, sizing_id }))
|
||||
}
|
||||
|
||||
let pipeline = self.pipeline.clone();
|
||||
let result = tokio::task::spawn_blocking(move || {
|
||||
pipeline.on_fetch(desired_kind, retrieved_kind, data, sizing_id, custom_sizing)
|
||||
}).await??;
|
||||
|
||||
let mut tasks = vec![];
|
||||
for store_entry in result.result.to_store {
|
||||
let storage = self.storage.clone();
|
||||
let bucket_id = self.bucket_id;
|
||||
let t = tokio::spawn(async move {
|
||||
storage.store(
|
||||
bucket_id,
|
||||
image_id,
|
||||
store_entry.kind,
|
||||
store_entry.sizing_id,
|
||||
store_entry.data,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(t);
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await??;
|
||||
}
|
||||
|
||||
Ok(result.result.response)
|
||||
}
|
||||
|
||||
pub async fn delete(&self, image_id: Uuid) -> anyhow::Result<()> {
|
||||
debug!("Removing image {}", image_id);
|
||||
|
||||
let _permit = get_optional_permit(&self.global_limiter, &self.limiter).await?;
|
||||
let purged_entities = self.storage.delete(self.bucket_id, image_id).await?;
|
||||
|
||||
if let Some(ref cache) = self.cache {
|
||||
for (sizing_id, kind) in purged_entities {
|
||||
let cache_key = self.cache_key(sizing_id, image_id, kind);
|
||||
cache.invalidate(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketController {
|
||||
#[inline]
|
||||
fn cache_key(&self, sizing_id: u32, image_id: Uuid, kind: ImageKind) -> String {
|
||||
format!(
|
||||
"{bucket}:{sizing}:{image}:{kind}",
|
||||
bucket = self.bucket_id,
|
||||
sizing = sizing_id,
|
||||
image = image_id,
|
||||
kind = kind.as_file_extension(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn caching_fetch(
|
||||
&self,
|
||||
image_id: Uuid,
|
||||
fetch_kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<Option<Bytes>> {
|
||||
let maybe_cache_backend = self.cache
|
||||
.as_ref()
|
||||
.map(Some)
|
||||
.unwrap_or_else(global_cache);
|
||||
|
||||
let cache_key = self.cache_key(sizing_id, image_id, fetch_kind);
|
||||
|
||||
if let Some(cache) = maybe_cache_backend {
|
||||
if let Some(buffer) = cache.get(&cache_key) {
|
||||
return Ok(Some(buffer))
|
||||
}
|
||||
}
|
||||
|
||||
let maybe_existing = self.storage.fetch(
|
||||
self.bucket_id,
|
||||
image_id,
|
||||
fetch_kind,
|
||||
sizing_id
|
||||
).await?;
|
||||
|
||||
if let Some(cache) = maybe_cache_backend {
|
||||
if let Some(ref buffer) = maybe_existing {
|
||||
cache.insert(cache_key, buffer.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(maybe_existing)
|
||||
}
|
||||
}
|
241
src/image.rs
|
@ -1,241 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Result;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use gotham::state::{FromState, State};
|
||||
use gotham_derive::{StateData, StaticResponseExtender};
|
||||
use hashbrown::HashMap;
|
||||
use image::{imageops, load_from_memory_with_format, DynamicImage};
|
||||
use log::{debug, error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
use webp::Encoder;
|
||||
|
||||
use crate::configure::StateConfig;
|
||||
use crate::storage::StorageBackend;
|
||||
use crate::traits::ImageStore;
|
||||
|
||||
pub type ImageData = HashMap<ImageFormat, BytesMut>;
|
||||
pub type ImagePresetsData = HashMap<String, ImageData>;
|
||||
|
||||
pub type ImageDataSizes = HashMap<ImageFormat, usize>;
|
||||
pub type ImagePresetDataSizes = HashMap<String, ImageDataSizes>;
|
||||
|
||||
#[derive(Debug, Clone, Ord, PartialOrd, Hash, Eq, PartialEq, Serialize, Deserialize, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ImageFormat {
|
||||
Png,
|
||||
Jpeg,
|
||||
Gif,
|
||||
WebP,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, StateData, StaticResponseExtender)]
|
||||
pub struct ImageGet {
|
||||
pub format: Option<ImageFormat>,
|
||||
pub encode: Option<bool>,
|
||||
pub preset: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ImageUpload {
|
||||
pub format: ImageFormat,
|
||||
pub data: String,
|
||||
pub category: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ImageUploaded {
|
||||
pub file_id: Uuid,
|
||||
pub formats: ImagePresetDataSizes,
|
||||
pub category: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, StateData, StaticResponseExtender)]
|
||||
pub struct ImageRemove {
|
||||
pub file_id: Uuid,
|
||||
}
|
||||
|
||||
macro_rules! convert {
|
||||
( $e:expr, $d:expr ) => {{
|
||||
|| -> anyhow::Result<BytesMut> {
|
||||
let buff = BytesMut::new();
|
||||
let mut writer = buff.writer();
|
||||
let start = Instant::now();
|
||||
$e.write_to(&mut writer, $d)?;
|
||||
debug!("format {:?} conversion took {:?}", $d, start.elapsed());
|
||||
Ok(writer.into_inner())
|
||||
}()
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! generate {
|
||||
( $n:expr, $e:expr, $hm1:expr, $hm2:expr, $cfg:expr ) => ({
|
||||
let (data, sizes) = convert_image($e, $cfg).await?;
|
||||
$hm1.insert($n.to_string(), sizes);
|
||||
$hm2.insert($n.to_string(), data);
|
||||
})
|
||||
}
|
||||
|
||||
macro_rules! is_enabled {
|
||||
( $format:expr, $options:expr ) => {{
|
||||
$options.get(&$format).map(|v| *v).unwrap_or(true)
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! log_err {
|
||||
( $result:expr, $msg:expr ) => {{
|
||||
match &$result {
|
||||
Ok(_) => (),
|
||||
Err(e) => error!("{} {:?}", $msg, e),
|
||||
};
|
||||
|
||||
$result
|
||||
}};
|
||||
}
|
||||
|
||||
fn spawn_conversion(
|
||||
img: Arc<DynamicImage>,
|
||||
format: ImageFormat,
|
||||
convert_to_format: image::ImageFormat,
|
||||
) -> Result<(ImageFormat, BytesMut)> {
|
||||
let img: BytesMut = log_err!(
|
||||
convert!(img, convert_to_format),
|
||||
format!("failed to convert {:?}: ", convert_to_format)
|
||||
)?;
|
||||
|
||||
return Ok((format, img));
|
||||
}
|
||||
|
||||
async fn convert_image(
|
||||
img: Arc<DynamicImage>,
|
||||
cfg: StateConfig,
|
||||
) -> Result<(ImageData, ImageDataSizes)> {
|
||||
let mut resulting_sizes = HashMap::with_capacity(4);
|
||||
let mut resulting_data = HashMap::with_capacity(4);
|
||||
|
||||
let mut handles = vec![];
|
||||
|
||||
if is_enabled!(ImageFormat::Png, cfg.0.formats) {
|
||||
let cloned = img.clone();
|
||||
let handle = tokio::task::spawn_blocking(move || {
|
||||
spawn_conversion(cloned, ImageFormat::Png, image::ImageFormat::Png)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
if is_enabled!(ImageFormat::Jpeg, cfg.0.formats) {
|
||||
let cloned = img.clone();
|
||||
let handle = tokio::task::spawn_blocking(move || {
|
||||
spawn_conversion(cloned, ImageFormat::Jpeg, image::ImageFormat::Jpeg)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
if is_enabled!(ImageFormat::Gif, cfg.0.formats) {
|
||||
let cloned = img.clone();
|
||||
let handle = tokio::task::spawn_blocking(move || {
|
||||
spawn_conversion(cloned, ImageFormat::Gif, image::ImageFormat::Gif)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// This is the slowest conversion, maybe change??
|
||||
// Updated: New encoder allows for multi threading encoding.
|
||||
if is_enabled!(ImageFormat::WebP, cfg.0.formats) {
|
||||
let cloned = img.clone();
|
||||
let handle = tokio::task::spawn_blocking(move || -> Result<(ImageFormat, BytesMut)> {
|
||||
let start = Instant::now();
|
||||
let raw = Encoder::from_image(cloned.as_ref()).encode();
|
||||
debug!(
|
||||
"format {:?} conversion took {:?}",
|
||||
image::ImageFormat::WebP,
|
||||
start.elapsed()
|
||||
);
|
||||
let webp = BytesMut::from(raw.as_ref());
|
||||
|
||||
Ok((ImageFormat::WebP, webp))
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
let (format, data) = handle.await??;
|
||||
resulting_sizes.insert(format, data.len());
|
||||
resulting_data.insert(format, data);
|
||||
}
|
||||
|
||||
Ok((resulting_data, resulting_sizes))
|
||||
}
|
||||
|
||||
pub async fn process_new_image(
|
||||
state: &mut State,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
data: Vec<u8>,
|
||||
) -> Result<(Uuid, ImagePresetDataSizes)> {
|
||||
let cfg = StateConfig::take_from(state);
|
||||
let storage = StorageBackend::take_from(state);
|
||||
|
||||
let fmt = match format {
|
||||
ImageFormat::Png => image::ImageFormat::Png,
|
||||
ImageFormat::Jpeg => image::ImageFormat::Jpeg,
|
||||
ImageFormat::Gif => image::ImageFormat::Gif,
|
||||
ImageFormat::WebP => image::ImageFormat::WebP,
|
||||
};
|
||||
|
||||
let presets = &cfg.0.size_presets;
|
||||
let mut converted_sizes = HashMap::with_capacity(presets.len());
|
||||
let mut converted_data = HashMap::with_capacity(presets.len());
|
||||
let original = Arc::from(log_err!(
|
||||
load_from_memory_with_format(&data, fmt),
|
||||
"failed to load format due to exception: "
|
||||
)?);
|
||||
generate!(
|
||||
"original",
|
||||
original.clone(),
|
||||
converted_sizes,
|
||||
converted_data,
|
||||
cfg.clone()
|
||||
);
|
||||
|
||||
for (preset_name, size) in presets {
|
||||
let cloned = original.clone();
|
||||
let im = Arc::new(cloned.resize(size.width, size.height, imageops::FilterType::Nearest));
|
||||
|
||||
generate!(
|
||||
preset_name,
|
||||
im,
|
||||
converted_sizes,
|
||||
converted_data,
|
||||
cfg.clone()
|
||||
);
|
||||
}
|
||||
|
||||
let file_id = Uuid::new_v4();
|
||||
storage.add_image(file_id, category, converted_data).await?;
|
||||
|
||||
Ok((file_id, converted_sizes))
|
||||
}
|
||||
|
||||
pub async fn get_image(
|
||||
state: &mut State,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
let storage = StorageBackend::take_from(state);
|
||||
storage.get_image(file_id, preset, category, format).await
|
||||
}
|
||||
|
||||
pub async fn delete_image(state: &mut State, file_id: Uuid) -> Result<()> {
|
||||
let storage = StorageBackend::take_from(state);
|
||||
let cfg = StateConfig::take_from(state);
|
||||
|
||||
let presets = cfg.0.size_presets.keys().collect();
|
||||
storage.remove_image(file_id, presets).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
434
src/main.rs
|
@ -1,250 +1,246 @@
|
|||
mod backends;
|
||||
mod cache;
|
||||
mod configure;
|
||||
mod context;
|
||||
mod image;
|
||||
mod response;
|
||||
mod routes;
|
||||
mod config;
|
||||
mod storage;
|
||||
mod traits;
|
||||
mod routes;
|
||||
mod pipelines;
|
||||
mod controller;
|
||||
mod utils;
|
||||
mod processor;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
mod cache;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use anyhow::{anyhow, Result};
|
||||
use clap::Parser;
|
||||
use mimalloc::MiMalloc;
|
||||
use poem::listener::TcpListener;
|
||||
use poem::{Endpoint, EndpointExt, IntoResponse, Request, Response, Route, Server};
|
||||
use poem_openapi::OpenApiService;
|
||||
use tokio::sync::Semaphore;
|
||||
use tracing::Level;
|
||||
use crate::controller::BucketController;
|
||||
use crate::storage::template::StorageBackend;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
extern crate tracing;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use gotham::middleware::logger::SimpleLogger as GothSimpleLogger;
|
||||
use gotham::middleware::state::StateMiddleware;
|
||||
use gotham::pipeline::new_pipeline;
|
||||
use gotham::pipeline::single::single_pipeline;
|
||||
use gotham::router::builder::{build_router, DefineSingleRoute, DrawRoutes};
|
||||
use gotham::router::Router;
|
||||
use gotham_derive::{StateData, StaticResponseExtender};
|
||||
use log::{info, LevelFilter};
|
||||
use serde::Deserialize;
|
||||
use tokio::fs;
|
||||
use uuid::Uuid;
|
||||
#[derive(Debug, Parser)]
|
||||
#[clap(author, version, about)]
|
||||
pub struct ServerConfig {
|
||||
#[clap(short, long, env, default_value = "127.0.0.1")]
|
||||
/// The binding host address of the server.
|
||||
pub host: String,
|
||||
|
||||
use crate::configure::{LogLevel, StateConfig};
|
||||
use crate::image::{ImageFormat, ImageGet, ImageRemove};
|
||||
use crate::storage::{DatabaseBackend, StorageBackend};
|
||||
use crate::traits::DatabaseLinker;
|
||||
#[clap(short, long, env, default_value = "8000")]
|
||||
pub port: u16,
|
||||
|
||||
/// A regex string for validating uuids in the request path.
|
||||
static UUID_REGEX: &str =
|
||||
"[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$";
|
||||
#[clap(short, long, env)]
|
||||
/// The external URL that would be used to access the server if applicable.
|
||||
///
|
||||
/// This only affects the documentation.
|
||||
pub docs_url: Option<String>,
|
||||
|
||||
/// A regex for separating out the category sections
|
||||
static CATEGORY_REGEX: &str = "[a-zA-Z0-9]+";
|
||||
#[clap(long, env, default_value = "info")]
|
||||
pub log_level: Level,
|
||||
|
||||
/// A simple extractor for taking the file_id out of the path
|
||||
/// of the request as a UUID.
|
||||
#[derive(Deserialize, StateData, StaticResponseExtender)]
|
||||
struct PathExtractor {
|
||||
category: Option<String>,
|
||||
file_id: Uuid,
|
||||
#[clap(long, env)]
|
||||
/// The file path to a given config file.
|
||||
///
|
||||
/// This can be either a JSON formatted config or YAML.
|
||||
pub config_file: PathBuf,
|
||||
}
|
||||
|
||||
/// Constructs all the routes for the server.
|
||||
fn router(backend: storage::StorageBackend, config: StateConfig) -> Result<Router> {
|
||||
let base = config.0.base_data_path.clone();
|
||||
|
||||
let cache_size = config.0.cache_size;
|
||||
cache::CacheState::init(cache_size);
|
||||
|
||||
let pipeline = new_pipeline()
|
||||
.add(GothSimpleLogger::new(log::Level::Info))
|
||||
.add(StateMiddleware::new(backend))
|
||||
.add(StateMiddleware::new(config))
|
||||
.build();
|
||||
let (chain, pipelines) = single_pipeline(pipeline);
|
||||
|
||||
Ok(build_router(chain, pipelines, |route| {
|
||||
route
|
||||
.get(&format!("{}/:file_id:{}", base, UUID_REGEX))
|
||||
.with_path_extractor::<PathExtractor>()
|
||||
.with_query_string_extractor::<ImageGet>()
|
||||
.to_async(routes::get_file);
|
||||
|
||||
route
|
||||
.get(&format!(
|
||||
"{}/:category:{}/:file_id:{}",
|
||||
base, CATEGORY_REGEX, UUID_REGEX
|
||||
))
|
||||
.with_path_extractor::<PathExtractor>()
|
||||
.with_query_string_extractor::<ImageGet>()
|
||||
.to_async(routes::get_file);
|
||||
|
||||
route.post("admin/create/image").to_async(routes::add_file);
|
||||
route
|
||||
.delete(&format!("admin/delete/image/:file_id:{}", UUID_REGEX))
|
||||
.with_path_extractor::<ImageRemove>()
|
||||
.to_async(routes::remove_file);
|
||||
|
||||
route.post("admin/list").to_async(routes::list_files);
|
||||
}))
|
||||
}
|
||||
|
||||
/// This will initialise the logger as well as
|
||||
/// start server and parse args (although not in that order).
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let cli_args = parse_args();
|
||||
let (name, args) = cli_args.subcommand();
|
||||
match name {
|
||||
"init" => run_init(args.unwrap()).await,
|
||||
"run" => run_server(args.unwrap()).await,
|
||||
other => {
|
||||
return Err(anyhow::Error::msg(format!(
|
||||
"command {} is not supported, only commands (init, run) are supported",
|
||||
other,
|
||||
)))
|
||||
},
|
||||
}?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_init(args: &ArgMatches<'_>) -> Result<()> {
|
||||
let target_backend = args.value_of("backend").expect("backend value not given");
|
||||
|
||||
let example = configure::Config::template(target_backend)?;
|
||||
let out = serde_json::to_string_pretty(&example)?;
|
||||
fs::write("./config.json", out).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_server(args: &ArgMatches<'_>) -> Result<()> {
|
||||
let cfg = if let Some(cfg) = args.value_of("config") {
|
||||
configure::Config::from_file(cfg)
|
||||
} else {
|
||||
return Err(anyhow::Error::msg(
|
||||
"missing required config file, exiting...",
|
||||
));
|
||||
}?;
|
||||
|
||||
let (goth_lvl, lust_lvl) = match cfg.log_level {
|
||||
LogLevel::Off => (LevelFilter::Off, LevelFilter::Off),
|
||||
LogLevel::Info => (LevelFilter::Info, LevelFilter::Info),
|
||||
LogLevel::Debug => (LevelFilter::Info, LevelFilter::Debug),
|
||||
LogLevel::Error => (LevelFilter::Error, LevelFilter::Error),
|
||||
};
|
||||
let args: ServerConfig = ServerConfig::parse();
|
||||
let bind = format!("{}:{}", args.host, args.port);
|
||||
|
||||
if std::env::var_os("RUST_LOG").is_none() {
|
||||
std::env::set_var("RUST_LOG", format!("warn,lust={},gotham={}", lust_lvl, goth_lvl));
|
||||
std::env::set_var(
|
||||
"RUST_LOG",
|
||||
format!("{},poem=info,scylla=info,hyper=info", args.log_level),
|
||||
);
|
||||
}
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
config::init(&args.config_file).await?;
|
||||
|
||||
if let Some(config) = config::config().global_cache {
|
||||
cache::init_cache(config)?;
|
||||
}
|
||||
|
||||
pretty_env_logger::init();
|
||||
setup_buckets().await?;
|
||||
|
||||
let lossless = cfg.webp_quality.is_none();
|
||||
let quality = if lossless {
|
||||
cfg.webp_compression.unwrap_or(50f32)
|
||||
let serving_path = if let Some(p) = config::config().base_serving_path.clone() {
|
||||
if !p.starts_with('/') {
|
||||
return Err(anyhow!("Invalid config: Base serving path must start with '/'"))
|
||||
}
|
||||
|
||||
p
|
||||
} else {
|
||||
cfg.webp_quality.unwrap()
|
||||
"".to_string()
|
||||
};
|
||||
let threading = cfg.webp_threading.unwrap_or(true);
|
||||
let method = cfg.webp_method.unwrap_or(4) as i32;
|
||||
|
||||
let api_service = OpenApiService::new(
|
||||
routes::LustApi,
|
||||
"Lust API",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
)
|
||||
.description(include_str!("../description.md"))
|
||||
.server(args.docs_url.unwrap_or_else(|| format!("http://{}/v1{}", &bind, &serving_path)));
|
||||
|
||||
let ui = api_service.redoc();
|
||||
let spec = api_service.spec();
|
||||
|
||||
let app = Route::new()
|
||||
.nest(format!("/v1{}", serving_path), api_service)
|
||||
.nest("/ui", ui)
|
||||
.at("/spec", poem::endpoint::make_sync(move |_| spec.clone()))
|
||||
.around(log);
|
||||
|
||||
info!("Lust has started!");
|
||||
info!(
|
||||
"setting up webp state. \
|
||||
Lossless: {}, \
|
||||
Quality: {}, \
|
||||
Method: {}, \
|
||||
Threading: {}",
|
||||
lossless, quality, method, threading
|
||||
"serving requests @ http://{}",
|
||||
&bind,
|
||||
);
|
||||
info!(
|
||||
"Image handling @ http://{}/{}",
|
||||
&bind,
|
||||
format!("v1{}", serving_path),
|
||||
);
|
||||
info!("GitHub: https://github.com/chillfish8/lust");
|
||||
info!("To ask questions visit: https://github.com/chillfish8/lust/discussions");
|
||||
info!(
|
||||
"To get started you can check out the documentation @ http://{}/ui",
|
||||
&bind,
|
||||
);
|
||||
webp::init_global(lossless, quality, method, threading);
|
||||
|
||||
let fields: Vec<ImageFormat> = cfg
|
||||
.formats
|
||||
.iter()
|
||||
.filter_map(
|
||||
|(format, enabled)| {
|
||||
if *enabled {
|
||||
Some(*format)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
Server::new(TcpListener::bind(&bind))
|
||||
.run_with_graceful_shutdown(
|
||||
app,
|
||||
async move {
|
||||
let _ = wait_for_signal().await;
|
||||
},
|
||||
Some(Duration::from_secs(2)),
|
||||
)
|
||||
.collect();
|
||||
|
||||
let mut presets: Vec<&str> = cfg.size_presets.keys().map(|v| v.as_str()).collect();
|
||||
presets.push("original");
|
||||
|
||||
let backend: StorageBackend = match cfg.database_backend.clone() {
|
||||
DatabaseBackend::Redis(db_cfg) => {
|
||||
let mut db = backends::redis::Backend::connect(db_cfg).await?;
|
||||
db.ensure_tables(presets, fields).await?;
|
||||
let _ = storage::REDIS.set(db);
|
||||
StorageBackend::Redis
|
||||
},
|
||||
DatabaseBackend::Cassandra(db_cfg) => {
|
||||
let mut db = backends::cql::Backend::connect(db_cfg).await?;
|
||||
db.ensure_tables(presets, fields).await?;
|
||||
let _ = storage::CASSANDRA.set(db);
|
||||
StorageBackend::Cassandra
|
||||
},
|
||||
DatabaseBackend::Postgres(db_cfg) => {
|
||||
let mut db = backends::sql::PostgresBackend::connect(db_cfg).await?;
|
||||
db.ensure_tables(presets, fields).await?;
|
||||
let _ = storage::POSTGRES.set(db);
|
||||
StorageBackend::Postgres
|
||||
},
|
||||
DatabaseBackend::MySQL(db_cfg) => {
|
||||
let mut db = backends::sql::MySQLBackend::connect(db_cfg).await?;
|
||||
db.ensure_tables(presets, fields).await?;
|
||||
let _ = storage::MYSQL.set(db);
|
||||
StorageBackend::MySQL
|
||||
},
|
||||
DatabaseBackend::Sqlite(db_cfg) => {
|
||||
let mut db = backends::sql::SqliteBackend::connect(db_cfg).await?;
|
||||
db.ensure_tables(presets, fields).await?;
|
||||
let _ = storage::SQLITE.set(db);
|
||||
StorageBackend::Sqlite
|
||||
},
|
||||
};
|
||||
|
||||
let addr: SocketAddr = format!("{}:{}", &cfg.host, cfg.port).parse()?;
|
||||
let state_cfg = StateConfig(Arc::new(cfg));
|
||||
let _ = gotham::init_server(addr, router(backend, state_cfg)?).await;
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_args() -> ArgMatches<'static> {
|
||||
App::new("Lust")
|
||||
.version("0.1.0")
|
||||
.author("Harrison Burt <hburt2003@gmail.com>")
|
||||
.about("A powerful automatic image server.")
|
||||
.subcommand(
|
||||
SubCommand::with_name("init")
|
||||
.about("Initialises the workspace with a configuration file")
|
||||
.version("0.1.0")
|
||||
.arg(
|
||||
Arg::with_name("backend")
|
||||
.short("b")
|
||||
.long("backend")
|
||||
.help("The target database backend")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("run")
|
||||
.about("Runs the server with the given configuration")
|
||||
.version("0.1.0")
|
||||
.arg(
|
||||
Arg::with_name("config")
|
||||
.short("c")
|
||||
.long("config")
|
||||
.help("The path to a given config file in JSON format.")
|
||||
.takes_value(true)
|
||||
.default_value("config.json"),
|
||||
),
|
||||
)
|
||||
.get_matches()
|
||||
async fn setup_buckets() -> anyhow::Result<()> {
|
||||
let global_limiter = config::config()
|
||||
.max_concurrency
|
||||
.map(Semaphore::new)
|
||||
.map(Arc::new);
|
||||
|
||||
let storage: Arc<dyn StorageBackend> = config::config()
|
||||
.backend
|
||||
.connect()
|
||||
.await?;
|
||||
|
||||
let buckets = config::config()
|
||||
.buckets
|
||||
.iter()
|
||||
.map(|(bucket, cfg)| {
|
||||
let bucket_id = crate::utils::crc_hash(bucket);
|
||||
let pipeline = cfg.mode.build_pipeline(cfg);
|
||||
let cache = cfg.cache
|
||||
.map(cache::new_cache)
|
||||
.transpose()?
|
||||
.flatten();
|
||||
|
||||
let controller = BucketController::new(
|
||||
bucket_id,
|
||||
cache,
|
||||
global_limiter.clone(),
|
||||
cfg.clone(),
|
||||
pipeline,
|
||||
storage.clone(),
|
||||
);
|
||||
Ok::<_, anyhow::Error>((bucket_id, controller))
|
||||
})
|
||||
.collect::<Result<hashbrown::HashMap<_, _>, anyhow::Error>>()?;
|
||||
|
||||
controller::init_buckets(buckets);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait_for_signal() -> Result<()> {
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::signal::ctrl_c().await?;
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
let mut stream_quit = signal(SignalKind::quit())?;
|
||||
let mut stream_interrupt = signal(SignalKind::interrupt())?;
|
||||
let mut stream_term = signal(SignalKind::terminate())?;
|
||||
|
||||
tokio::select! {
|
||||
_ = stream_quit.recv() => {},
|
||||
_ = stream_interrupt.recv() => {},
|
||||
_ = stream_term.recv() => {},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
async fn log<E: Endpoint>(next: E, req: Request) -> poem::Result<Response> {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().clone();
|
||||
|
||||
let start = Instant::now();
|
||||
let res = next.call(req).await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
match res {
|
||||
Ok(r) => {
|
||||
let resp = r.into_response();
|
||||
|
||||
info!(
|
||||
"{} -> {} {} [ {:?} ] - {:?}",
|
||||
method.as_str(),
|
||||
resp.status().as_u16(),
|
||||
resp.status().canonical_reason().unwrap_or(""),
|
||||
elapsed,
|
||||
path.path(),
|
||||
);
|
||||
|
||||
Ok(resp)
|
||||
},
|
||||
Err(e) => {
|
||||
let msg = format!("{}", &e);
|
||||
let resp = e.into_response();
|
||||
|
||||
if resp.status().as_u16() >= 500 {
|
||||
error!("{}", msg);
|
||||
}
|
||||
|
||||
info!(
|
||||
"{} -> {} {} [ {:?} ] - {:?}",
|
||||
method.as_str(),
|
||||
resp.status().as_u16(),
|
||||
resp.status().canonical_reason().unwrap_or(""),
|
||||
elapsed,
|
||||
path.path(),
|
||||
);
|
||||
|
||||
Ok(resp)
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
use bytes::Bytes;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use crate::config::{BucketConfig, ImageFormats, ImageKind, ResizingConfig};
|
||||
use crate::pipelines::{Pipeline, PipelineResult, StoreEntry};
|
||||
use crate::processor;
|
||||
|
||||
pub struct AheadOfTimePipeline {
|
||||
presets: HashMap<u32, ResizingConfig>,
|
||||
formats: ImageFormats,
|
||||
}
|
||||
|
||||
impl AheadOfTimePipeline {
|
||||
pub fn new(cfg: &BucketConfig) -> Self {
|
||||
Self {
|
||||
presets: cfg.presets
|
||||
.iter()
|
||||
.map(|(key, cfg)| (crate::utils::crc_hash(key), cfg.clone()))
|
||||
.collect(),
|
||||
formats: cfg.formats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pipeline for AheadOfTimePipeline {
|
||||
fn on_upload(&self, kind: ImageKind, data: Vec<u8>) -> anyhow::Result<PipelineResult> {
|
||||
let resized = processor::resizer::resize_image_to_presets(&self.presets, kind, data.into())?;
|
||||
|
||||
let mut to_store = vec![];
|
||||
for to_encode in resized {
|
||||
let encoded_images = processor::encoder::encode_following_config(
|
||||
self.formats,
|
||||
to_encode.img,
|
||||
to_encode.sizing_id
|
||||
)?;
|
||||
|
||||
to_store.extend(
|
||||
encoded_images
|
||||
.into_iter()
|
||||
.map(|v| StoreEntry {
|
||||
kind: v.kind,
|
||||
sizing_id: v.sizing_id,
|
||||
data: v.buff,
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(PipelineResult {
|
||||
response: None,
|
||||
to_store,
|
||||
})
|
||||
}
|
||||
|
||||
fn on_fetch(
|
||||
&self,
|
||||
_desired_kind: ImageKind,
|
||||
data_kind: ImageKind,
|
||||
data: Bytes,
|
||||
sizing_id: u32,
|
||||
_custom_size: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<PipelineResult> {
|
||||
Ok(PipelineResult {
|
||||
response: Some(StoreEntry {
|
||||
data,
|
||||
sizing_id,
|
||||
kind: data_kind,
|
||||
}),
|
||||
to_store: vec![],
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
use bytes::Bytes;
|
||||
use hashbrown::HashMap;
|
||||
use image::load_from_memory_with_format;
|
||||
use crate::config::{BucketConfig, ImageFormats, ImageKind, ResizingConfig};
|
||||
use crate::pipelines::{Pipeline, PipelineResult, StoreEntry};
|
||||
use crate::processor;
|
||||
|
||||
pub struct JustInTimePipeline {
|
||||
presets: HashMap<u32, ResizingConfig>,
|
||||
formats: ImageFormats,
|
||||
}
|
||||
|
||||
impl JustInTimePipeline {
|
||||
pub fn new(cfg: &BucketConfig) -> Self {
|
||||
Self {
|
||||
presets: cfg.presets
|
||||
.iter()
|
||||
.map(|(key, cfg)| (crate::utils::crc_hash(key), cfg.clone()))
|
||||
.collect(),
|
||||
formats: cfg.formats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pipeline for JustInTimePipeline {
|
||||
fn on_upload(&self, kind: ImageKind, data: Vec<u8>) -> anyhow::Result<PipelineResult> {
|
||||
let webp_config = webp::config(
|
||||
self.formats.webp_config.quality.is_none(),
|
||||
self.formats.webp_config.quality.unwrap_or(50f32),
|
||||
self.formats.webp_config.method.unwrap_or(4) as i32,
|
||||
self.formats.webp_config.threading,
|
||||
);
|
||||
|
||||
let img = load_from_memory_with_format(&data, kind.into())?;
|
||||
let img = processor::encoder::encode_once(
|
||||
webp_config,
|
||||
self.formats.original_image_store_format,
|
||||
img,
|
||||
0,
|
||||
)?;
|
||||
|
||||
Ok(PipelineResult {
|
||||
response: None,
|
||||
to_store: vec![StoreEntry { kind: img.kind, data: img.buff, sizing_id: img.sizing_id }],
|
||||
})
|
||||
}
|
||||
|
||||
fn on_fetch(
|
||||
&self,
|
||||
desired_kind: ImageKind,
|
||||
data_kind: ImageKind,
|
||||
data: Bytes,
|
||||
sizing_id: u32,
|
||||
_custom_size: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<PipelineResult> {
|
||||
let webp_config = webp::config(
|
||||
self.formats.webp_config.quality.is_none(),
|
||||
self.formats.webp_config.quality.unwrap_or(50f32),
|
||||
self.formats.webp_config.method.unwrap_or(4) as i32,
|
||||
self.formats.webp_config.threading,
|
||||
);
|
||||
|
||||
let img = load_from_memory_with_format(&data, data_kind.into())?;
|
||||
let (img, sizing_id) = if sizing_id != 0 {
|
||||
if let Some(cfg) = self.presets.get(&sizing_id) {
|
||||
(processor::resizer::resize(*cfg, &img), sizing_id)
|
||||
} else {
|
||||
(img, 0)
|
||||
}
|
||||
} else {
|
||||
(img, 0)
|
||||
};
|
||||
|
||||
let encoded = processor::encoder::encode_once(
|
||||
webp_config,
|
||||
desired_kind,
|
||||
img,
|
||||
sizing_id,
|
||||
)?;
|
||||
|
||||
Ok(PipelineResult {
|
||||
response: Some(StoreEntry {
|
||||
kind: encoded.kind,
|
||||
data: encoded.buff.clone(),
|
||||
sizing_id: encoded.sizing_id,
|
||||
}),
|
||||
to_store: vec![StoreEntry {
|
||||
kind: encoded.kind,
|
||||
data: encoded.buff.clone(),
|
||||
sizing_id: encoded.sizing_id,
|
||||
}]
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use bytes::Bytes;
|
||||
use serde::Deserialize;
|
||||
use crate::config::{BucketConfig, ImageKind};
|
||||
|
||||
pub mod realtime;
|
||||
pub mod aot;
|
||||
pub mod jit;
|
||||
mod register;
|
||||
|
||||
pub use register::{Pipeline, PipelineSelector};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ProcessingMode {
|
||||
/// Images will be optimised and resized when they're
|
||||
/// requested and then stored.
|
||||
Jit,
|
||||
|
||||
/// Images have all optimizations and resizing applied to them
|
||||
/// and stored at upload time.
|
||||
Aot,
|
||||
|
||||
/// Only the original image will be stored, any optimisations will always
|
||||
/// be ran at request time and not stored.
|
||||
Realtime,
|
||||
}
|
||||
|
||||
impl Default for ProcessingMode {
|
||||
fn default() -> Self {
|
||||
Self::Jit
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessingMode {
|
||||
pub fn build_pipeline(&self, cfg: &BucketConfig) -> PipelineController {
|
||||
// Macro magic, ignore any type errors by the linter here.
|
||||
let selector = match self {
|
||||
Self::Jit => PipelineSelector::from(jit::JustInTimePipeline::new(cfg)),
|
||||
Self::Aot => PipelineSelector::from(aot::AheadOfTimePipeline::new(cfg)),
|
||||
Self::Realtime => PipelineSelector::from(realtime::RealtimePipeline::new(cfg)),
|
||||
};
|
||||
|
||||
PipelineController {
|
||||
inner: selector.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ExecutionResult {
|
||||
/// The result of a given pipeline after a given operation.
|
||||
pub result: PipelineResult,
|
||||
|
||||
/// The time taken to execute the pipeline.
|
||||
pub execution_time: Duration,
|
||||
}
|
||||
|
||||
pub struct PipelineResult {
|
||||
/// To be returned to the client in some form.
|
||||
pub response: Option<StoreEntry>,
|
||||
|
||||
/// To be persisted to the given storage backend.
|
||||
pub to_store: Vec<StoreEntry>,
|
||||
}
|
||||
|
||||
/// The raw binary data of the image.
|
||||
pub struct StoreEntry {
|
||||
pub data: Bytes,
|
||||
pub kind: ImageKind,
|
||||
pub sizing_id: u32,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PipelineController {
|
||||
inner: Arc<register::PipelineSelector>,
|
||||
}
|
||||
|
||||
impl PipelineController {
|
||||
pub fn on_upload(
|
||||
&self,
|
||||
kind: ImageKind,
|
||||
data: Vec<u8>,
|
||||
) -> anyhow::Result<ExecutionResult> {
|
||||
let instant = Instant::now();
|
||||
let result = self.inner.on_upload(kind, data)?;
|
||||
let execution_time = instant.elapsed();
|
||||
|
||||
Ok(ExecutionResult { result, execution_time })
|
||||
}
|
||||
|
||||
pub fn on_fetch(
|
||||
&self,
|
||||
desired_kind: ImageKind,
|
||||
data_kind: ImageKind,
|
||||
data: Bytes,
|
||||
sizing_id: u32,
|
||||
custom_size: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<ExecutionResult> {
|
||||
let instant = Instant::now();
|
||||
let result = self.inner.on_fetch(desired_kind, data_kind, data, sizing_id, custom_size)?;
|
||||
let execution_time = instant.elapsed();
|
||||
|
||||
Ok(ExecutionResult { result, execution_time })
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
use bytes::Bytes;
|
||||
use hashbrown::HashMap;
|
||||
use image::load_from_memory_with_format;
|
||||
use crate::config::{BucketConfig, ImageFormats, ImageKind, ResizingConfig};
|
||||
use crate::pipelines::{Pipeline, PipelineResult, StoreEntry};
|
||||
use crate::processor;
|
||||
|
||||
pub struct RealtimePipeline {
|
||||
presets: HashMap<u32, ResizingConfig>,
|
||||
formats: ImageFormats,
|
||||
}
|
||||
|
||||
impl RealtimePipeline {
|
||||
pub fn new(cfg: &BucketConfig) -> Self {
|
||||
Self {
|
||||
presets: cfg.presets
|
||||
.iter()
|
||||
.map(|(key, cfg)| (crate::utils::crc_hash(key), *cfg))
|
||||
.collect(),
|
||||
formats: cfg.formats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pipeline for RealtimePipeline {
|
||||
fn on_upload(&self, kind: ImageKind, data: Vec<u8>) -> anyhow::Result<PipelineResult> {
|
||||
let webp_config = webp::config(
|
||||
self.formats.webp_config.quality.is_none(),
|
||||
self.formats.webp_config.quality.unwrap_or(50f32),
|
||||
self.formats.webp_config.method.unwrap_or(4) as i32,
|
||||
self.formats.webp_config.threading,
|
||||
);
|
||||
|
||||
let img = load_from_memory_with_format(&data, kind.into())?;
|
||||
let img = processor::encoder::encode_once(webp_config, self.formats.original_image_store_format, img, 0)?;
|
||||
|
||||
Ok(PipelineResult {
|
||||
response: None,
|
||||
to_store: vec![StoreEntry { kind: img.kind, data: img.buff, sizing_id: 0 }],
|
||||
})
|
||||
}
|
||||
|
||||
fn on_fetch(
|
||||
&self,
|
||||
desired_kind: ImageKind,
|
||||
data_kind: ImageKind,
|
||||
data: Bytes,
|
||||
sizing_id: u32,
|
||||
custom_size: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<PipelineResult> {
|
||||
let webp_config = webp::config(
|
||||
self.formats.webp_config.quality.is_none(),
|
||||
self.formats.webp_config.quality.unwrap_or(50f32),
|
||||
self.formats.webp_config.method.unwrap_or(4) as i32,
|
||||
self.formats.webp_config.threading,
|
||||
);
|
||||
|
||||
let img = load_from_memory_with_format(&data, data_kind.into())?;
|
||||
let (img, sizing_id) = if sizing_id != 0 {
|
||||
let maybe_resize = match self.presets.get(&sizing_id) {
|
||||
None => if let Some((width, height)) = custom_size {
|
||||
Some((
|
||||
ResizingConfig {
|
||||
width,
|
||||
height,
|
||||
filter: Default::default()
|
||||
},
|
||||
crate::utils::crc_hash((width, height)),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
other => other.map(|v| (*v, sizing_id)),
|
||||
};
|
||||
|
||||
if let Some((cfg, sizing_id)) = maybe_resize {
|
||||
(processor::resizer::resize(cfg, &img), sizing_id)
|
||||
} else {
|
||||
(img, 0)
|
||||
}
|
||||
} else {
|
||||
(img, 0)
|
||||
};
|
||||
|
||||
let encoded = processor::encoder::encode_once(
|
||||
webp_config,
|
||||
desired_kind,
|
||||
img,
|
||||
sizing_id,
|
||||
)?;
|
||||
|
||||
Ok(PipelineResult {
|
||||
response: Some(StoreEntry {
|
||||
kind: encoded.kind,
|
||||
data: encoded.buff,
|
||||
sizing_id: encoded.sizing_id,
|
||||
}),
|
||||
to_store: vec![]
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
use bytes::Bytes;
|
||||
use enum_dispatch::enum_dispatch;
|
||||
use crate::config::ImageKind;
|
||||
use crate::pipelines::PipelineResult;
|
||||
|
||||
use super::realtime::RealtimePipeline;
|
||||
use super::aot::AheadOfTimePipeline;
|
||||
use super::jit::JustInTimePipeline;
|
||||
|
||||
/// Pipelines are dynamically selected here.
|
||||
///
|
||||
/// This is not a Box<dyn Trait> due to this being rather
|
||||
/// performance critical and this approach allows for more
|
||||
/// room for the compiler to optimise.
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[enum_dispatch(Pipeline)]
|
||||
pub enum PipelineSelector {
|
||||
RealtimePipeline,
|
||||
AheadOfTimePipeline,
|
||||
JustInTimePipeline,
|
||||
}
|
||||
|
||||
#[enum_dispatch]
|
||||
pub trait Pipeline: Sync + Send + 'static {
|
||||
fn on_upload(&self, kind: ImageKind, data: Vec<u8>) -> anyhow::Result<PipelineResult>;
|
||||
|
||||
fn on_fetch(
|
||||
&self,
|
||||
desired_kind: ImageKind,
|
||||
data_kind: ImageKind,
|
||||
data: Bytes,
|
||||
sizing_id: u32,
|
||||
custom_size: Option<(u32, u32)>,
|
||||
) -> anyhow::Result<PipelineResult>;
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
use std::io::Cursor;
|
||||
use std::sync::Arc;
|
||||
use bytes::Bytes;
|
||||
use image::{DynamicImage, ImageFormat};
|
||||
use crate::config::{ImageFormats, ImageKind};
|
||||
|
||||
|
||||
pub struct EncodedImage {
|
||||
pub kind: ImageKind,
|
||||
pub buff: Bytes,
|
||||
pub sizing_id: u32,
|
||||
}
|
||||
|
||||
pub fn encode_following_config(
|
||||
cfg: ImageFormats,
|
||||
img: DynamicImage,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<Vec<EncodedImage>> {
|
||||
let original_image = Arc::new(img);
|
||||
|
||||
let webp_config = webp::config(
|
||||
cfg.webp_config.quality.is_none(),
|
||||
cfg.webp_config.quality.unwrap_or(50f32),
|
||||
cfg.webp_config.method.unwrap_or(4) as i32,
|
||||
cfg.webp_config.threading,
|
||||
);
|
||||
|
||||
let (tx, rx) = crossbeam::channel::bounded(4);
|
||||
|
||||
for variant in ImageKind::variants() {
|
||||
if cfg.is_enabled(*variant) {
|
||||
let tx_local = tx.clone();
|
||||
let local = original_image.clone();
|
||||
rayon::spawn(move || {
|
||||
let result = encode_to(webp_config, &local, (*variant).into());
|
||||
tx_local
|
||||
.send(result.map(|v| EncodedImage { kind: *variant, buff: v, sizing_id }))
|
||||
.expect("Failed to respond to encoding request. Sender already closed.");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Needed to prevent deadlock.
|
||||
drop(tx);
|
||||
|
||||
let mut processed = vec![];
|
||||
while let Ok(encoded) = rx.recv() {
|
||||
processed.push(encoded);
|
||||
}
|
||||
|
||||
let finished = processed
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<EncodedImage>, _>>()?;
|
||||
|
||||
Ok(finished)
|
||||
}
|
||||
|
||||
|
||||
pub fn encode_once(
|
||||
webp_cfg: webp::WebPConfig,
|
||||
to: ImageKind,
|
||||
img: DynamicImage,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<EncodedImage> {
|
||||
let (tx, rx) = crossbeam::channel::bounded(4);
|
||||
|
||||
rayon::spawn(move || {
|
||||
let result = encode_to(webp_cfg, &img, to.into());
|
||||
tx.send(result.map(|v| EncodedImage { kind: to, buff: v, sizing_id }))
|
||||
.expect("Failed to respond to encoding request. Sender already closed.");
|
||||
});
|
||||
|
||||
rx.recv()?
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn encode_to(webp_cfg: webp::WebPConfig, img: &DynamicImage, format: ImageFormat) -> anyhow::Result<Bytes> {
|
||||
if let ImageFormat::WebP = format {
|
||||
let webp_image = webp::Encoder::from_image(webp_cfg, img);
|
||||
let encoded = webp_image.encode();
|
||||
|
||||
return Ok(Bytes::from(encoded?.to_vec()))
|
||||
}
|
||||
|
||||
let mut buff = Cursor::new(Vec::new());
|
||||
img.write_to(&mut buff, format)?;
|
||||
Ok(Bytes::from(buff.into_inner()))
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
pub mod encoder;
|
||||
pub mod resizer;
|
|
@ -0,0 +1,49 @@
|
|||
use std::sync::Arc;
|
||||
use bytes::Bytes;
|
||||
use hashbrown::HashMap;
|
||||
use image::{DynamicImage, load_from_memory_with_format};
|
||||
use crate::config::{ImageKind, ResizingConfig};
|
||||
|
||||
pub struct ResizedImage {
|
||||
pub sizing_id: u32,
|
||||
pub img: DynamicImage,
|
||||
}
|
||||
|
||||
pub fn resize_image_to_presets(
|
||||
presets: &HashMap<u32, ResizingConfig>,
|
||||
kind: ImageKind,
|
||||
data: Bytes,
|
||||
) -> anyhow::Result<Vec<ResizedImage>> {
|
||||
let original_image = Arc::new(load_from_memory_with_format(data.as_ref(), kind.into())?);
|
||||
|
||||
let (tx, rx) = crossbeam::channel::bounded(presets.len());
|
||||
for (sizing_id, cfg) in presets {
|
||||
let sizing_id = *sizing_id;
|
||||
let cfg = *cfg;
|
||||
let local_tx = tx.clone();
|
||||
let local = original_image.clone();
|
||||
rayon::spawn(move || {
|
||||
let img = resize(cfg, &local);
|
||||
local_tx
|
||||
.send(ResizedImage { sizing_id, img })
|
||||
.expect("Failed to respond to encoding request. Sender already closed.");
|
||||
});
|
||||
}
|
||||
|
||||
// Needed to prevent deadlock.
|
||||
drop(tx);
|
||||
|
||||
let mut finished = vec![ResizedImage {
|
||||
sizing_id: 0,
|
||||
img: original_image.as_ref().clone(),
|
||||
}];
|
||||
while let Ok(encoded) = rx.recv() {
|
||||
finished.push(encoded);
|
||||
}
|
||||
|
||||
Ok(finished)
|
||||
}
|
||||
|
||||
pub fn resize(cfg: ResizingConfig, img: &DynamicImage) -> DynamicImage {
|
||||
img.resize(cfg.width, cfg.height, cfg.filter.into())
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
use bytes::BytesMut;
|
||||
use gotham::hyper::http::{header, Response, StatusCode};
|
||||
use gotham::hyper::Body;
|
||||
use headers::{ContentType, HeaderMapExt};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::image::ImageFormat;
|
||||
|
||||
/// A standard JSON response with the content type set to application/json
|
||||
pub fn json_response(status: StatusCode, data: Option<Value>) -> Response<Body> {
|
||||
let payload = json!({
|
||||
"status": status.as_u16(),
|
||||
"data": data,
|
||||
});
|
||||
|
||||
let mut resp = Response::builder()
|
||||
.status(status)
|
||||
.body(Body::from(serde_json::to_vec(&payload).unwrap()))
|
||||
.unwrap();
|
||||
|
||||
resp.headers_mut().typed_insert(ContentType::json());
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
pub fn image_response(format: ImageFormat, data: BytesMut) -> Response<Body> {
|
||||
let mut resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(data.to_vec()))
|
||||
.unwrap();
|
||||
|
||||
let content_type = match format {
|
||||
ImageFormat::Png => "image/png",
|
||||
ImageFormat::Jpeg => "image/jpeg",
|
||||
ImageFormat::Gif => "image/gif",
|
||||
ImageFormat::WebP => "image/webp",
|
||||
};
|
||||
|
||||
resp.headers_mut()
|
||||
.insert(header::CONTENT_TYPE, content_type.parse().unwrap());
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
pub fn empty_response(status: StatusCode) -> Response<Body> {
|
||||
let mut resp = Response::builder()
|
||||
.status(status)
|
||||
.body(Body::from(Vec::new()))
|
||||
.unwrap();
|
||||
|
||||
resp.headers_mut().typed_insert(ContentType::text_utf8());
|
||||
|
||||
resp
|
||||
}
|
575
src/routes.rs
|
@ -1,290 +1,305 @@
|
|||
use base64::{decode, encode};
|
||||
use gotham::handler::HandlerResult;
|
||||
use gotham::hyper::http::StatusCode;
|
||||
use gotham::hyper::{body, Body};
|
||||
use gotham::state::{FromState, State};
|
||||
use log::{debug, error};
|
||||
use std::fmt::Display;
|
||||
use bytes::Bytes;
|
||||
use poem_openapi::OpenApi;
|
||||
use poem::{Body, Result};
|
||||
use poem_openapi::{ApiResponse, Object};
|
||||
use poem_openapi::param::{Header, Path, Query};
|
||||
use poem_openapi::payload::{Binary, Json};
|
||||
use futures::StreamExt;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::cache::CACHE_STATE;
|
||||
use crate::configure::StateConfig;
|
||||
use crate::context::{FilesListPayload, FilterType, OrderBy};
|
||||
use crate::image::{
|
||||
delete_image,
|
||||
get_image,
|
||||
process_new_image,
|
||||
ImageGet,
|
||||
ImageRemove,
|
||||
ImageUpload,
|
||||
ImageUploaded,
|
||||
};
|
||||
use crate::response::{empty_response, image_response, json_response};
|
||||
use crate::storage::StorageBackend;
|
||||
use crate::traits::ImageStore;
|
||||
use crate::PathExtractor;
|
||||
use crate::config::{config, ImageKind};
|
||||
use crate::controller::{BucketController, get_bucket_by_name, UploadInfo};
|
||||
use crate::pipelines::ProcessingMode;
|
||||
|
||||
macro_rules! from_body {
|
||||
( $e:expr ) => {{
|
||||
let res = body::to_bytes(Body::take_from(&mut $e)).await;
|
||||
let bod = match res {
|
||||
Ok(bod) => bod,
|
||||
Err(e) => {
|
||||
error!("failed to read data from body {:?}", &e);
|
||||
return Ok((
|
||||
$e,
|
||||
json_response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Some(json!({
|
||||
"message": format!("encountered exception: {:?}", e)
|
||||
})),
|
||||
),
|
||||
));
|
||||
|
||||
#[derive(Debug, Object)]
|
||||
pub struct Detail {
|
||||
/// Additional information regarding the response.
|
||||
detail: String,
|
||||
}
|
||||
|
||||
|
||||
#[derive(ApiResponse)]
|
||||
pub enum UploadResponse {
|
||||
#[oai(status = 200)]
|
||||
Ok(Json<UploadInfo>),
|
||||
|
||||
/// Bucket not found
|
||||
#[oai(status = 404)]
|
||||
NotFound,
|
||||
|
||||
/// The image format was incorrect or the system was
|
||||
/// unable to guess the format of the image.
|
||||
#[oai(status = 400)]
|
||||
InvalidImageFormat,
|
||||
|
||||
/// The upload exceeds the configured maximum file size.
|
||||
#[oai(status = 413)]
|
||||
TooBig,
|
||||
|
||||
#[allow(unused)]
|
||||
/// You are not authorized to complete this action.
|
||||
///
|
||||
/// This normally means the `Authorization` bearer has been left out
|
||||
/// of the request or is invalid.
|
||||
#[oai(status = 401)]
|
||||
Unauthorized,
|
||||
}
|
||||
|
||||
#[derive(ApiResponse)]
|
||||
pub enum DeleteResponse {
|
||||
#[oai(status = 200)]
|
||||
Ok,
|
||||
|
||||
#[allow(unused)]
|
||||
/// You are not authorized to complete this action.
|
||||
///
|
||||
/// This normally means the `Authorization` bearer has been left out
|
||||
/// of the request or is invalid.
|
||||
#[oai(status = 401)]
|
||||
Unauthorized,
|
||||
|
||||
/// Bucket does not exist.
|
||||
#[oai(status = 404)]
|
||||
NotFound,
|
||||
}
|
||||
|
||||
#[derive(ApiResponse)]
|
||||
pub enum FetchResponse {
|
||||
#[oai(status = 200)]
|
||||
Ok(
|
||||
Binary<Vec<u8>>,
|
||||
#[oai(header = "content-type")] String,
|
||||
),
|
||||
|
||||
/// The request is invalid with the current configuration.
|
||||
///
|
||||
/// See the detail section for more info.
|
||||
#[oai(status = 400)]
|
||||
UnsupportedOperation(Json<Detail>),
|
||||
|
||||
/// Bucket does not exist or image does not exist.
|
||||
///
|
||||
/// See the detail section for more info.
|
||||
#[oai(status = 404)]
|
||||
NotFound(Json<Detail>),
|
||||
}
|
||||
|
||||
impl FetchResponse {
|
||||
fn bucket_not_found(bucket: &str) -> Self {
|
||||
let detail = Detail {
|
||||
detail: format!("The bucket {:?} does not exist.", bucket),
|
||||
};
|
||||
|
||||
Self::NotFound(Json(detail))
|
||||
}
|
||||
|
||||
fn image_not_found(image_id: Uuid) -> Self {
|
||||
let detail = Detail {
|
||||
detail: format!("The image {:?} does not exist in bucket.", image_id),
|
||||
};
|
||||
|
||||
Self::NotFound(Json(detail))
|
||||
}
|
||||
|
||||
fn bad_request(msg: impl Display) -> Self {
|
||||
let detail = Detail {
|
||||
detail: msg.to_string(),
|
||||
};
|
||||
|
||||
Self::UnsupportedOperation(Json(detail))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct LustApi ;
|
||||
|
||||
#[OpenApi(prefix_path = "/:bucket")]
|
||||
impl LustApi {
|
||||
/// Upload Image
|
||||
///
|
||||
/// Upload an image to the given bucket.
|
||||
/// The `content-type` header must be provided as well
|
||||
/// as the `content-length` header otherwise the request will be rejected.
|
||||
///
|
||||
/// The uploaded file must also not exceed the given `content-length`.
|
||||
#[oai(path = "/", method = "post")]
|
||||
pub async fn upload_image(
|
||||
&self,
|
||||
/// The bucket that the image should be uploaded.
|
||||
bucket: Path<String>,
|
||||
|
||||
/// The total size of the image in bytes.
|
||||
#[oai(name = "content-length")] content_length: Header<usize>,
|
||||
|
||||
/// The format that the uploaded image is encoded in.
|
||||
///
|
||||
/// If not provided, lust will guess the encoding.
|
||||
format: Query<Option<ImageKind>>,
|
||||
|
||||
/// The raw binary data of the image.
|
||||
file: Binary<Body>,
|
||||
) -> Result<UploadResponse> {
|
||||
let bucket = match get_bucket_by_name(&*bucket) {
|
||||
None => return Ok(UploadResponse::NotFound),
|
||||
Some(b) => b,
|
||||
};
|
||||
|
||||
let length = if !config().valid_global_size(*content_length) {
|
||||
return Ok(UploadResponse::TooBig)
|
||||
} else {
|
||||
let local_limit = bucket
|
||||
.cfg()
|
||||
.max_upload_size
|
||||
.map(|v| (v * 1024) as usize)
|
||||
.unwrap_or(u32::MAX as usize);
|
||||
|
||||
if *content_length > local_limit {
|
||||
return Ok(UploadResponse::TooBig)
|
||||
}
|
||||
|
||||
*content_length
|
||||
};
|
||||
|
||||
let mut allocated_image = Vec::with_capacity(length);
|
||||
let mut stream = file.0.into_bytes_stream();
|
||||
while let Some(chunk) = stream.next().await {
|
||||
let chunk: Bytes = chunk.map_err(anyhow::Error::from)?;
|
||||
allocated_image.extend(chunk.into_iter());
|
||||
|
||||
if allocated_image.len() > length {
|
||||
return Ok(UploadResponse::TooBig)
|
||||
}
|
||||
}
|
||||
|
||||
let format = if let Some(format) = format.0 {
|
||||
let validate = image::load_from_memory_with_format(&allocated_image, format.into());
|
||||
if validate.is_err() {
|
||||
return Ok(UploadResponse::InvalidImageFormat)
|
||||
}
|
||||
|
||||
format
|
||||
} else {
|
||||
let maybe_guessed = image::guess_format(&allocated_image)
|
||||
.map(ImageKind::from_guessed_format)
|
||||
.map_err(anyhow::Error::from)?;
|
||||
|
||||
if let Some(guessed) = maybe_guessed {
|
||||
guessed
|
||||
} else {
|
||||
return Ok(UploadResponse::InvalidImageFormat)
|
||||
}
|
||||
};
|
||||
|
||||
match serde_json::from_slice(bod.as_ref()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok((
|
||||
$e,
|
||||
json_response(
|
||||
StatusCode::UNPROCESSABLE_ENTITY,
|
||||
Some(json!({
|
||||
"message":
|
||||
format!(
|
||||
"failed to deserialize POST body due to the following error: {:?}",
|
||||
e
|
||||
)
|
||||
})),
|
||||
),
|
||||
let info = bucket.upload(format, allocated_image).await?;
|
||||
Ok(UploadResponse::Ok(Json(info)))
|
||||
}
|
||||
|
||||
/// Fetch Image
|
||||
///
|
||||
/// Fetch the image from the storage backend and apply and additional affects
|
||||
/// if required.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[oai(path = "/:image_id", method = "get")]
|
||||
pub async fn fetch_image(
|
||||
&self,
|
||||
/// The bucket to try fetch the image from.
|
||||
bucket: Path<String>,
|
||||
|
||||
/// The id of the image.
|
||||
image_id: Path<Uuid>,
|
||||
|
||||
/// The encoding format that the image should be returned as.
|
||||
format: Query<Option<ImageKind>>,
|
||||
|
||||
/// The size preset that should be used when returning the image.
|
||||
size: Query<Option<String>>,
|
||||
|
||||
/// A custom width to resize the returned image to.
|
||||
width: Query<Option<u32>>,
|
||||
|
||||
/// A custom height to resize the returned image to.
|
||||
height: Query<Option<u32>>,
|
||||
|
||||
/// A set of `,` seperated content-types that could be sent as a response.
|
||||
/// E.g. `image/png,image/webp,image/gif`
|
||||
accept: Header<Option<String>>,
|
||||
) -> Result<FetchResponse> {
|
||||
let bucket = match get_bucket_by_name(&*bucket) {
|
||||
None => return Ok(FetchResponse::bucket_not_found(&*bucket)),
|
||||
Some(b) => b,
|
||||
};
|
||||
|
||||
let kind = get_image_kind(format.0, accept.0, bucket);
|
||||
let custom_sizing = match (width.0, height.0) {
|
||||
(Some(w), Some(h)) => if bucket.cfg().mode != ProcessingMode::Realtime {
|
||||
return Ok(FetchResponse::bad_request(
|
||||
"Custom resizing can only be done when bucket set to 'realtime' processing mode",
|
||||
))
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/// Gets a given image from the storage backend with the given
|
||||
/// preset and format if it does not already exist in cache.
|
||||
///
|
||||
/// This endpoint can return any of the following status codes:
|
||||
///
|
||||
/// 404:
|
||||
/// The image does not exist, NOTE: This endpoint will **always**
|
||||
/// return a 404 if an unexpected error was encountered rather than
|
||||
/// raising an error to the requester, instead it will be logged in
|
||||
/// the console.
|
||||
///
|
||||
/// 200:
|
||||
/// The image was successfully fetched and sent as the response.
|
||||
///
|
||||
/// TODO:
|
||||
/// Likely performance issues could become apparent at higher
|
||||
/// concurrency due to the Mutex on the LRU cache, although this
|
||||
/// is probably insignificant compared to the time spent on IO.
|
||||
pub async fn get_file(mut state: State) -> HandlerResult {
|
||||
let path_vars = PathExtractor::take_from(&mut state);
|
||||
let params = ImageGet::take_from(&mut state);
|
||||
let config = StateConfig::take_from(&mut state);
|
||||
|
||||
let file_id = path_vars.file_id;
|
||||
let category = path_vars.category.unwrap_or_else(|| "default".to_string());
|
||||
|
||||
let format = params
|
||||
.format
|
||||
.unwrap_or_else(|| config.0.default_serving_format.clone());
|
||||
|
||||
let mut preset = params
|
||||
.preset
|
||||
.unwrap_or_else(|| config.0.default_serving_preset.clone());
|
||||
|
||||
if preset != "original" {
|
||||
// We dont want to necessarily error if you give an invalid
|
||||
// preset, but we dont want to attempt something that doesnt
|
||||
// exist.
|
||||
if !config.0.size_presets.contains_key(&preset) {
|
||||
preset = "original".into();
|
||||
}
|
||||
}
|
||||
|
||||
let cache = CACHE_STATE.get().expect("not initialised");
|
||||
let img = if let Some(cached) = cache.get(file_id, preset.clone(), format) {
|
||||
debug!(
|
||||
"using cached version of image for file_id: {}, preset: {}, format: {:?}",
|
||||
file_id, &preset, format,
|
||||
);
|
||||
Some(cached)
|
||||
} else {
|
||||
debug!(
|
||||
"using backend version of image for file_id: {}, preset: {}, format: {:?}",
|
||||
file_id, &preset, format,
|
||||
);
|
||||
if let Some(data) = get_image(&mut state, file_id, preset.clone(), &category, format).await
|
||||
{
|
||||
cache.set(file_id, preset, format, data.clone());
|
||||
Some(data)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
match img {
|
||||
None => Ok((state, empty_response(StatusCode::NOT_FOUND))),
|
||||
Some(data) => {
|
||||
if params.encode.unwrap_or(false) {
|
||||
let encoded = encode(data.as_ref());
|
||||
return Ok((
|
||||
state,
|
||||
json_response(
|
||||
StatusCode::OK,
|
||||
Some(json!({
|
||||
"image": encoded,
|
||||
})),
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok((state, image_response(format, data)))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles a POST request for adding a image to the store.
|
||||
///
|
||||
/// The image payload must be in JSON format and be base64 encoded in
|
||||
/// the standard specification.
|
||||
///
|
||||
/// E.g.
|
||||
/// ```json
|
||||
/// {
|
||||
/// "format": "png",
|
||||
/// "data": "...data ensues..."
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn add_file(mut state: State) -> HandlerResult {
|
||||
let upload: ImageUpload = from_body!(state);
|
||||
|
||||
let format = upload.format;
|
||||
let data = match decode(upload.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => {
|
||||
return Ok((
|
||||
state,
|
||||
json_response(
|
||||
StatusCode::UNPROCESSABLE_ENTITY,
|
||||
Some(json!({
|
||||
"message": "data is not encoded in base64 format correctly",
|
||||
})),
|
||||
),
|
||||
} else {
|
||||
Some((w, h))
|
||||
},
|
||||
(None, None) => None,
|
||||
_ => return Ok(FetchResponse::bad_request(
|
||||
"A custom size must include both the width and the height.",
|
||||
))
|
||||
};
|
||||
|
||||
let img = bucket.fetch(image_id.0, kind, size.0, custom_sizing).await?;
|
||||
match img {
|
||||
None => Ok(FetchResponse::image_not_found(image_id.0)),
|
||||
Some(img) => Ok(FetchResponse::Ok(Binary(img.data.to_vec()), img.kind.as_content_type()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete Image
|
||||
///
|
||||
/// Delete the given image.
|
||||
/// This will purge all variants of the image including sizing presets and formats.
|
||||
///
|
||||
/// Images that do not exist already will be ignored and will not return a 404.
|
||||
#[oai(path = "/:image_id", method = "delete")]
|
||||
pub async fn delete_image(
|
||||
&self,
|
||||
/// The bucket to try delete the image from.
|
||||
bucket: Path<String>,
|
||||
|
||||
/// The image to delete try delete.
|
||||
image_id: Path<Uuid>,
|
||||
) -> Result<DeleteResponse> {
|
||||
let bucket = match get_bucket_by_name(&*bucket) {
|
||||
None => return Ok(DeleteResponse::NotFound),
|
||||
Some(b) => b,
|
||||
};
|
||||
|
||||
bucket.delete(*image_id).await?;
|
||||
|
||||
Ok(DeleteResponse::Ok)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn get_image_kind(direct_format: Option<ImageKind>, accept: Option<String>, bucket: &BucketController) -> ImageKind {
|
||||
match direct_format {
|
||||
Some(kind) => kind,
|
||||
None => match accept {
|
||||
Some(accept) => {
|
||||
let parts = accept.split(',');
|
||||
for accepted in parts {
|
||||
if let Some(kind) = ImageKind::from_content_type(accepted) {
|
||||
return kind;
|
||||
}
|
||||
}
|
||||
|
||||
bucket.cfg()
|
||||
.default_serving_format
|
||||
.unwrap_or_else(|| bucket.cfg().formats.first_enabled_format())
|
||||
},
|
||||
None => bucket.cfg()
|
||||
.default_serving_format
|
||||
.unwrap_or_else(|| bucket.cfg().formats.first_enabled_format())
|
||||
},
|
||||
};
|
||||
|
||||
let category = upload.category.unwrap_or_else(|| "default".to_string());
|
||||
|
||||
let (file_id, formats) = match process_new_image(&mut state, &category, format, data).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok((
|
||||
state,
|
||||
json_response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Some(json!({
|
||||
"message": format!("failed to process image: {:?}", e),
|
||||
})),
|
||||
),
|
||||
));
|
||||
},
|
||||
};
|
||||
|
||||
let resp = ImageUploaded {
|
||||
file_id,
|
||||
formats,
|
||||
category,
|
||||
};
|
||||
|
||||
let resp = serde_json::to_value(resp).expect("failed to serialize uploaded stats");
|
||||
|
||||
Ok((state, json_response(StatusCode::OK, Some(resp))))
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles removing a image from the store.
|
||||
///
|
||||
/// This removes the image from both the database backend and
|
||||
/// the cache if it exists in there.
|
||||
///
|
||||
/// This only requires the UUID of the image no other information
|
||||
/// is needed.
|
||||
///
|
||||
/// Note on semantics:
|
||||
/// This endpoint does not check if the image exists or not,
|
||||
/// it simply tries to remove it if it exists otherwise ignores it.
|
||||
///
|
||||
/// For that reason this will always return 200 if no exceptions
|
||||
/// happened at the time.
|
||||
///
|
||||
/// This endpoint can return any of the following responses:
|
||||
///
|
||||
/// 500:
|
||||
/// The server could not complete the request due to a unexpected
|
||||
/// exception, this is typically only possible via the transaction
|
||||
/// on the database backend failing.
|
||||
///
|
||||
/// 200:
|
||||
/// The image has been removed successfully.
|
||||
pub async fn remove_file(mut state: State) -> HandlerResult {
|
||||
let params = ImageRemove::take_from(&mut state);
|
||||
|
||||
if let Err(e) = delete_image(&mut state, params.file_id).await {
|
||||
return Ok((
|
||||
state,
|
||||
json_response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Some(json!({
|
||||
"message": format!(
|
||||
"failed to delete image with id: {} due to the following exception: {:?}",
|
||||
params.file_id,
|
||||
e
|
||||
)
|
||||
})),
|
||||
),
|
||||
));
|
||||
};
|
||||
|
||||
Ok((
|
||||
state,
|
||||
json_response(
|
||||
StatusCode::OK,
|
||||
Some(json!({
|
||||
"message": "file deleted if exists",
|
||||
"file_id": params.file_id.to_string()
|
||||
})),
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn list_files(mut state: State) -> HandlerResult {
|
||||
let payload: FilesListPayload = from_body!(state);
|
||||
let storage = StorageBackend::take_from(&mut state);
|
||||
|
||||
let filter = payload.filter.unwrap_or_else(|| FilterType::All);
|
||||
let sort = payload.order.unwrap_or_else(|| OrderBy::CreationDate);
|
||||
let page = payload.page.unwrap_or_else(|| 1usize);
|
||||
|
||||
let (status, payload) = match storage.list_entities(filter.clone(), sort, page).await {
|
||||
Ok(results) => (
|
||||
StatusCode::OK,
|
||||
Some(json!({
|
||||
"page": page,
|
||||
"filtered_by": filter,
|
||||
"ordered_by": sort,
|
||||
"results": results,
|
||||
})),
|
||||
),
|
||||
Err(e) => (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Some(json!({
|
||||
"message": format!("failed to fetch results for page due to error: {:?}", e)
|
||||
})),
|
||||
),
|
||||
};
|
||||
|
||||
Ok((state, json_response(status, payload)))
|
||||
}
|
||||
|
|
137
src/storage.rs
|
@ -1,137 +0,0 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
use gotham_derive::StateData;
|
||||
use log::error;
|
||||
use once_cell::sync::OnceCell;
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::backends;
|
||||
use crate::context::{FilterType, IndexResult, OrderBy};
|
||||
use crate::image::{ImageFormat, ImagePresetsData};
|
||||
use crate::traits::ImageStore;
|
||||
|
||||
// The bellow definitions are a hack, this is due to
|
||||
pub(crate) static REDIS: OnceCell<backends::redis::Backend> = OnceCell::new();
|
||||
pub(crate) static CASSANDRA: OnceCell<backends::cql::Backend> = OnceCell::new();
|
||||
pub(crate) static POSTGRES: OnceCell<backends::sql::PostgresBackend> = OnceCell::new();
|
||||
pub(crate) static MYSQL: OnceCell<backends::sql::MySQLBackend> = OnceCell::new();
|
||||
pub(crate) static SQLITE: OnceCell<backends::sql::SqliteBackend> = OnceCell::new();
|
||||
|
||||
#[derive(Clone, Deserialize)]
|
||||
#[serde(rename_all = "lowercase", tag = "type", content = "config")]
|
||||
pub enum DatabaseBackend {
|
||||
Redis(backends::redis::RedisConfig),
|
||||
Cassandra(backends::cql::DatabaseConfig),
|
||||
Postgres(backends::sql::DatabaseConfig),
|
||||
MySQL(backends::sql::DatabaseConfig),
|
||||
Sqlite(backends::sql::DatabaseConfig),
|
||||
}
|
||||
|
||||
macro_rules! acquire {
|
||||
( $e:expr ) => {{
|
||||
$e.get().expect("backend not initialised")
|
||||
}};
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, StateData)]
|
||||
pub enum StorageBackend {
|
||||
Redis,
|
||||
Cassandra,
|
||||
Postgres,
|
||||
MySQL,
|
||||
Sqlite,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImageStore for StorageBackend {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut> {
|
||||
match self {
|
||||
Self::Redis => {
|
||||
acquire!(REDIS)
|
||||
.get_image(file_id, preset, category, format)
|
||||
.await
|
||||
},
|
||||
Self::Cassandra => {
|
||||
acquire!(CASSANDRA)
|
||||
.get_image(file_id, preset, category, format)
|
||||
.await
|
||||
},
|
||||
Self::Postgres => {
|
||||
acquire!(POSTGRES)
|
||||
.get_image(file_id, preset, category, format)
|
||||
.await
|
||||
},
|
||||
Self::MySQL => {
|
||||
acquire!(MYSQL)
|
||||
.get_image(file_id, preset, category, format)
|
||||
.await
|
||||
},
|
||||
Self::Sqlite => {
|
||||
acquire!(SQLITE)
|
||||
.get_image(file_id, preset, category, format)
|
||||
.await
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()> {
|
||||
let res = match self {
|
||||
Self::Redis => acquire!(REDIS).add_image(file_id, category, data).await,
|
||||
Self::Cassandra => acquire!(CASSANDRA).add_image(file_id, category, data).await,
|
||||
Self::Postgres => acquire!(POSTGRES).add_image(file_id, category, data).await,
|
||||
Self::MySQL => acquire!(MYSQL).add_image(file_id, category, data).await,
|
||||
Self::Sqlite => acquire!(SQLITE).add_image(file_id, category, data).await,
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
error!("failed to add image {:?}", e);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()> {
|
||||
let res = match self {
|
||||
Self::Redis => acquire!(REDIS).remove_image(file_id, presets).await,
|
||||
Self::Cassandra => acquire!(CASSANDRA).remove_image(file_id, presets).await,
|
||||
Self::Postgres => acquire!(POSTGRES).remove_image(file_id, presets).await,
|
||||
Self::MySQL => acquire!(MYSQL).remove_image(file_id, presets).await,
|
||||
Self::Sqlite => acquire!(SQLITE).remove_image(file_id, presets).await,
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
error!("failed to remove image {:?}", e);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>> {
|
||||
let res = match self {
|
||||
Self::Redis => acquire!(REDIS).list_entities(filter, order, page).await,
|
||||
Self::Cassandra => acquire!(CASSANDRA).list_entities(filter, order, page).await,
|
||||
Self::Postgres => acquire!(POSTGRES).list_entities(filter, order, page).await,
|
||||
Self::MySQL => acquire!(MYSQL).list_entities(filter, order, page).await,
|
||||
Self::Sqlite => acquire!(SQLITE).list_entities(filter, order, page).await,
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
error!("failed to list images {:?}", e);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
use std::time::Duration;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use rusoto_core::credential::{AutoRefreshingProvider, ChainProvider};
|
||||
use rusoto_core::{HttpClient, HttpConfig, Region};
|
||||
use rusoto_s3::{DeleteObjectRequest, GetObjectRequest, PutObjectRequest, S3Client, S3, StreamingBody};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::ImageKind;
|
||||
use crate::controller::get_bucket_by_id;
|
||||
use crate::StorageBackend;
|
||||
|
||||
/// A credential timeout.
|
||||
const CREDENTIAL_TIMEOUT: u64 = 5;
|
||||
|
||||
pub struct BlobStorageBackend {
|
||||
bucket_name: String,
|
||||
client: S3Client,
|
||||
store_public: bool,
|
||||
}
|
||||
|
||||
impl BlobStorageBackend {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
name: String,
|
||||
region: String,
|
||||
endpoint: String,
|
||||
store_public: bool,
|
||||
) -> Result<Self> {
|
||||
let mut chain_provider = ChainProvider::new();
|
||||
chain_provider.set_timeout(Duration::from_secs(CREDENTIAL_TIMEOUT));
|
||||
|
||||
let credentials_provider = AutoRefreshingProvider::new(chain_provider)
|
||||
.with_context(|| "Failed to fetch credentials for the object storage.")?;
|
||||
|
||||
let mut http_config: HttpConfig = HttpConfig::default();
|
||||
http_config.pool_idle_timeout(std::time::Duration::from_secs(10));
|
||||
|
||||
let http_client = HttpClient::new_with_config(http_config)
|
||||
.with_context(|| "Failed to create request dispatcher")?;
|
||||
|
||||
let region = Region::Custom { name: region, endpoint };
|
||||
|
||||
let client = S3Client::new_with(
|
||||
http_client,
|
||||
credentials_provider,
|
||||
region,
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
bucket_name: name,
|
||||
client,
|
||||
store_public,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn format_path(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
sizing_id: u32,
|
||||
image_id: Uuid,
|
||||
format: ImageKind,
|
||||
) -> String {
|
||||
format!("{}/{}/{}.{}", bucket_id, sizing_id, image_id, format.as_file_extension())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for BlobStorageBackend {
|
||||
async fn store(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
data: Bytes,
|
||||
) -> anyhow::Result<()> {
|
||||
let store_in = self.format_path(bucket_id, sizing_id, image_id, kind);
|
||||
|
||||
debug!("Storing image in bucket @ {}", &store_in);
|
||||
|
||||
let request = PutObjectRequest {
|
||||
bucket: self.bucket_name.clone(),
|
||||
key: store_in,
|
||||
body: Some(StreamingBody::from(data.to_vec())),
|
||||
content_length: Some(data.len() as i64),
|
||||
acl: if self.store_public { Some("public-read".to_string()) } else { None },
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.client.put_object(request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<Option<Bytes>> {
|
||||
let store_in = self.format_path(bucket_id, sizing_id, image_id, kind);
|
||||
|
||||
debug!("Retrieving image in bucket @ {}", &store_in);
|
||||
let request = GetObjectRequest {
|
||||
key: store_in,
|
||||
bucket: self.bucket_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let res = self.client.get_object(request).await?;
|
||||
let content_length = res.content_length.unwrap_or(0) as usize;
|
||||
|
||||
if let Some(body) = res.body {
|
||||
let mut buffer = Vec::with_capacity(content_length);
|
||||
body
|
||||
.into_async_read()
|
||||
.read_to_end(&mut buffer)
|
||||
.await?;
|
||||
|
||||
Ok(Some(buffer.into()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
) -> anyhow::Result<Vec<(u32, ImageKind)>> {
|
||||
let bucket = get_bucket_by_id(bucket_id)
|
||||
.ok_or_else(|| anyhow!("Bucket does not exist."))?
|
||||
.cfg();
|
||||
|
||||
let mut hit_entries = vec![];
|
||||
for sizing_id in bucket.sizing_preset_ids().iter().copied() {
|
||||
for kind in ImageKind::variants() {
|
||||
let store_in = self.format_path(bucket_id, sizing_id, image_id, *kind);
|
||||
|
||||
debug!("Purging file in bucket @ {}", &store_in);
|
||||
let request = DeleteObjectRequest {
|
||||
bucket: self.bucket_name.clone(),
|
||||
key: store_in,
|
||||
..Default::default()
|
||||
};
|
||||
self.client.delete_object(request).await?;
|
||||
hit_entries.push((sizing_id, *kind));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hit_entries)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
use std::io::ErrorKind;
|
||||
use std::path::PathBuf;
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::ImageKind;
|
||||
use crate::controller::get_bucket_by_id;
|
||||
use crate::StorageBackend;
|
||||
|
||||
pub struct FileSystemBackend {
|
||||
directory: PathBuf,
|
||||
}
|
||||
|
||||
impl FileSystemBackend {
|
||||
pub fn new(dir: PathBuf) -> Self {
|
||||
Self {
|
||||
directory: dir,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn format_path(&self, bucket_id: u32, sizing_id: u32) -> PathBuf {
|
||||
self.directory
|
||||
.join(bucket_id.to_string())
|
||||
.join(sizing_id.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for FileSystemBackend {
|
||||
async fn store(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
data: Bytes,
|
||||
) -> anyhow::Result<()> {
|
||||
let store_in = self.format_path(bucket_id, sizing_id);
|
||||
let path = store_in.join(format!("{}.{}", image_id, kind.as_file_extension()));
|
||||
|
||||
debug!("Storing image @ {:?}", &path);
|
||||
match tokio::fs::write(&path, &data).await {
|
||||
Ok(()) => Ok(()),
|
||||
Err(ref e) if e.kind() == ErrorKind::NotFound => {
|
||||
tokio::fs::create_dir_all(store_in).await?;
|
||||
tokio::fs::write(&path, data).await?;
|
||||
Ok(())
|
||||
},
|
||||
Err(other) => Err(other.into())
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<Option<Bytes>> {
|
||||
let store_in = self.format_path(bucket_id, sizing_id);
|
||||
let path = store_in.join(format!("{}.{}", image_id, kind.as_file_extension()));
|
||||
|
||||
debug!("Retrieving image @ {:?}", &path);
|
||||
match tokio::fs::read(&path).await {
|
||||
Ok(data) => Ok(Some(Bytes::from(data))),
|
||||
Err(ref e) if e.kind() == ErrorKind::NotFound => Ok(None),
|
||||
Err(other) => Err(other.into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
) -> anyhow::Result<Vec<(u32, ImageKind)>> {
|
||||
let bucket = get_bucket_by_id(bucket_id)
|
||||
.ok_or_else(|| anyhow!("Bucket does not exist."))?
|
||||
.cfg();
|
||||
|
||||
let mut hit_entries = vec![];
|
||||
for sizing_id in bucket.sizing_preset_ids().iter().copied() {
|
||||
for kind in ImageKind::variants() {
|
||||
let store_in = self.format_path(bucket_id, sizing_id);
|
||||
let path = store_in.join(format!("{}.{}", image_id, kind.as_file_extension()));
|
||||
debug!("Purging image @ {:?}", &path);
|
||||
|
||||
match tokio::fs::remove_file(&path).await {
|
||||
Ok(()) => {
|
||||
hit_entries.push((sizing_id, *kind));
|
||||
},
|
||||
Err(ref e) if e.kind() == ErrorKind::NotFound => continue,
|
||||
Err(other) => return Err(other.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hit_entries)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
mod register;
|
||||
mod filesystem;
|
||||
mod blob_storage;
|
||||
mod scylladb;
|
||||
|
||||
pub use register::BackendConfigs;
|
|
@ -0,0 +1,77 @@
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::StorageBackend;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum BackendConfigs {
|
||||
Scylla {
|
||||
nodes: Vec<String>,
|
||||
username: Option<String>,
|
||||
password: Option<String>,
|
||||
keyspace: String,
|
||||
table: Option<String>,
|
||||
},
|
||||
FileSystem {
|
||||
/// The base output directory to store files.
|
||||
directory: PathBuf,
|
||||
},
|
||||
BlobStorage {
|
||||
/// The name of the bucket.
|
||||
name: String,
|
||||
|
||||
/// The region of the bucket.
|
||||
region: String,
|
||||
|
||||
/// The bucket endpoint.
|
||||
endpoint: String,
|
||||
|
||||
#[serde(default)]
|
||||
/// Store objects with the `public-read` acl.
|
||||
store_public: bool,
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendConfigs {
|
||||
pub async fn connect(&self) -> anyhow::Result<Arc<dyn StorageBackend>> {
|
||||
match self {
|
||||
Self::FileSystem { directory } => {
|
||||
Ok(Arc::new(super::filesystem::FileSystemBackend::new(directory.clone())))
|
||||
},
|
||||
Self::BlobStorage {
|
||||
name,
|
||||
region,
|
||||
endpoint,
|
||||
store_public,
|
||||
} => {
|
||||
let backend = super::blob_storage::BlobStorageBackend::new(
|
||||
name.to_string(),
|
||||
region.to_string(),
|
||||
endpoint.to_string(),
|
||||
*store_public,
|
||||
)?;
|
||||
|
||||
Ok(Arc::new(backend))
|
||||
},
|
||||
Self::Scylla {
|
||||
nodes,
|
||||
username,
|
||||
password,
|
||||
keyspace,
|
||||
table,
|
||||
} => {
|
||||
let backend = super::scylladb::ScyllaBackend::connect(
|
||||
keyspace.clone(),
|
||||
table.clone(),
|
||||
nodes,
|
||||
username.clone(),
|
||||
password.clone(),
|
||||
).await?;
|
||||
|
||||
Ok(Arc::new(backend))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
use anyhow::anyhow;
|
||||
use bytes::Bytes;
|
||||
use uuid::Uuid;
|
||||
use async_trait::async_trait;
|
||||
use scylla::IntoTypedRows;
|
||||
use crate::config::ImageKind;
|
||||
use crate::controller::get_bucket_by_id;
|
||||
use crate::StorageBackend;
|
||||
|
||||
|
||||
pub struct ScyllaBackend {
|
||||
table: String,
|
||||
connection: session::Session,
|
||||
}
|
||||
|
||||
impl ScyllaBackend {
|
||||
pub async fn connect(
|
||||
keyspace: String,
|
||||
table: Option<String>,
|
||||
known_nodes: &[String],
|
||||
user: Option<String>,
|
||||
password: Option<String>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let mut cfg = scylla::SessionConfig::new();
|
||||
cfg.add_known_nodes(known_nodes);
|
||||
cfg.auth_password = user;
|
||||
cfg.auth_password = password;
|
||||
|
||||
let base = scylla::Session::connect(cfg).await?;
|
||||
base.use_keyspace(keyspace, false).await?;
|
||||
|
||||
let connection = session::Session::from(base);
|
||||
|
||||
let table = table.unwrap_or_else(|| "lust_image".to_string());
|
||||
let qry = format!("CREATE TABLE IF NOT EXISTS {} (\
|
||||
bucket_id bigint, \
|
||||
sizing_id bigint, \
|
||||
image_id uuid, \
|
||||
kind text, \
|
||||
data blob, \
|
||||
PRIMARY KEY ((bucket_id, sizing_id, image_id, kind))
|
||||
)", table);
|
||||
connection.query(&qry, &[]).await?;
|
||||
|
||||
Ok(Self {
|
||||
table,
|
||||
connection
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for ScyllaBackend {
|
||||
async fn store(&self, bucket_id: u32, image_id: Uuid, kind: ImageKind, sizing_id: u32, data: Bytes) -> anyhow::Result<()> {
|
||||
let qry = format!("INSERT INTO {table} (bucket_id, sizing_id, image_id, kind, data) VALUES (?, ?, ?, ?, ?);", table = self.table);
|
||||
|
||||
self.connection
|
||||
.query_prepared(&qry, (bucket_id as i64, sizing_id as i64, image_id, kind.as_file_extension(), data.to_vec()))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch(&self, bucket_id: u32, image_id: Uuid, kind: ImageKind, sizing_id: u32) -> anyhow::Result<Option<Bytes>> {
|
||||
let qry = format!("SELECT data FROM {table} WHERE bucket_id = ? AND image_id = ? AND kind = ? AND sizing_id = ?;", table = self.table);
|
||||
|
||||
let buff = self.connection
|
||||
.query_prepared(&qry, (bucket_id as i64, image_id, kind.as_file_extension(), sizing_id as i64))
|
||||
.await?
|
||||
.rows
|
||||
.unwrap_or_default()
|
||||
.into_typed::<(Vec<u8>,)>()
|
||||
.next()
|
||||
.transpose()?
|
||||
.map(|v| Bytes::from(v.0));
|
||||
|
||||
Ok(buff)
|
||||
}
|
||||
|
||||
async fn delete(&self, bucket_id: u32, image_id: Uuid) -> anyhow::Result<Vec<(u32, ImageKind)>> {
|
||||
let qry = format!("DELETE FROM {table} WHERE bucket_id = ? AND image_id = ? AND kind = ? AND sizing_id = ?;", table = self.table);
|
||||
|
||||
let bucket = get_bucket_by_id(bucket_id)
|
||||
.ok_or_else(|| anyhow!("Bucket does not exist."))?
|
||||
.cfg();
|
||||
|
||||
let mut hit_entries = vec![];
|
||||
for sizing_id in bucket.sizing_preset_ids().iter().copied() {
|
||||
for kind in ImageKind::variants() {
|
||||
let values = (bucket_id as i64, image_id, kind.as_file_extension(), sizing_id as i64);
|
||||
debug!("Purging image @ {:?}", &values);
|
||||
|
||||
self.connection
|
||||
.query_prepared(&qry, values)
|
||||
.await?;
|
||||
|
||||
hit_entries.push((sizing_id, *kind))
|
||||
}
|
||||
}
|
||||
|
||||
Ok(hit_entries)
|
||||
}
|
||||
}
|
||||
|
||||
mod session {
|
||||
use std::fmt::Debug;
|
||||
use scylla::frame::value::ValueList;
|
||||
use scylla::query::Query;
|
||||
use scylla::transport::errors::{DbError, QueryError};
|
||||
use scylla::QueryResult;
|
||||
|
||||
pub struct Session(scylla::CachingSession);
|
||||
|
||||
impl From<scylla::Session> for Session {
|
||||
fn from(s: scylla::Session) -> Self {
|
||||
Self(scylla::CachingSession::from(s, 100))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<scylla::Session> for Session {
|
||||
fn as_ref(&self) -> &scylla::Session {
|
||||
&self.0.session
|
||||
}
|
||||
}
|
||||
|
||||
impl Session {
|
||||
#[instrument(skip(self, query), level = "debug")]
|
||||
pub async fn query(
|
||||
&self,
|
||||
query: &str,
|
||||
values: impl ValueList + Debug,
|
||||
) -> Result<QueryResult, QueryError> {
|
||||
debug!("executing query {}", query);
|
||||
let result = self.0.execute(query, &values).await;
|
||||
|
||||
if let Err(ref e) = result {
|
||||
consider_logging_error(e);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[instrument(skip(self, query), level = "debug")]
|
||||
pub async fn query_prepared(
|
||||
&self,
|
||||
query: &str,
|
||||
values: impl ValueList + Debug,
|
||||
) -> Result<QueryResult, QueryError> {
|
||||
debug!("preparing new statement: {}", query);
|
||||
let result = self.0.execute(Query::from(query), &values).await;
|
||||
|
||||
match result {
|
||||
Ok(res) => Ok(res),
|
||||
Err(e) => {
|
||||
consider_logging_error(&e);
|
||||
Err(e)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn consider_logging_error(e: &QueryError) {
|
||||
if let QueryError::DbError(DbError::AlreadyExists { .. }, ..) = e {
|
||||
info!("Keyspace already exists, skipping...");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
pub mod backends;
|
||||
pub mod template;
|
|
@ -0,0 +1,30 @@
|
|||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use uuid::Uuid;
|
||||
use crate::config::ImageKind;
|
||||
|
||||
#[async_trait]
|
||||
pub trait StorageBackend: Sync + Send + 'static {
|
||||
async fn store(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
data: Bytes,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
async fn fetch(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
kind: ImageKind,
|
||||
sizing_id: u32,
|
||||
) -> anyhow::Result<Option<Bytes>>;
|
||||
|
||||
async fn delete(
|
||||
&self,
|
||||
bucket_id: u32,
|
||||
image_id: Uuid,
|
||||
) -> anyhow::Result<Vec<(u32, ImageKind)>>;
|
||||
}
|
|
@ -0,0 +1,308 @@
|
|||
use std::sync::Arc;
|
||||
use image::load_from_memory_with_format;
|
||||
use poem::Route;
|
||||
use poem::http::StatusCode;
|
||||
use poem_openapi::OpenApiService;
|
||||
use poem::test::{TestClient, TestResponse};
|
||||
use poem::web::headers;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::{BucketController, cache, config, controller, StorageBackend};
|
||||
|
||||
const JIT_CONFIG: &str = include_str!("../tests/configs/jit-mode.yaml");
|
||||
const AOT_CONFIG: &str = include_str!("../tests/configs/aot-mode.yaml");
|
||||
const REALTIME_CONFIG: &str = include_str!("../tests/configs/realtime-mode.yaml");
|
||||
const TEST_IMAGE: &[u8] = include_bytes!("../examples/example.jpeg");
|
||||
|
||||
async fn setup_environment(cfg: &str) -> anyhow::Result<TestClient<Route>> {
|
||||
config::init_test(cfg)?;
|
||||
|
||||
let global_limiter = config::config()
|
||||
.max_concurrency
|
||||
.map(Semaphore::new)
|
||||
.map(Arc::new);
|
||||
|
||||
let storage: Arc<dyn StorageBackend> = config::config()
|
||||
.backend
|
||||
.connect()
|
||||
.await?;
|
||||
|
||||
let buckets = config::config()
|
||||
.buckets
|
||||
.iter()
|
||||
.map(|(bucket, cfg)| {
|
||||
let bucket_id = crate::utils::crc_hash(bucket);
|
||||
let pipeline = cfg.mode.build_pipeline(cfg);
|
||||
let cache = cfg.cache
|
||||
.map(cache::new_cache)
|
||||
.transpose()?
|
||||
.flatten();
|
||||
|
||||
let controller = BucketController::new(
|
||||
bucket_id,
|
||||
cache,
|
||||
global_limiter.clone(),
|
||||
cfg.clone(),
|
||||
pipeline,
|
||||
storage.clone(),
|
||||
);
|
||||
Ok::<_, anyhow::Error>((bucket_id, controller))
|
||||
})
|
||||
.collect::<Result<hashbrown::HashMap<_, _>, anyhow::Error>>()?;
|
||||
|
||||
controller::init_buckets(buckets);
|
||||
|
||||
let app = OpenApiService::new(
|
||||
crate::routes::LustApi,
|
||||
"Lust API",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
);
|
||||
|
||||
let app = Route::new().nest("/v1", app);
|
||||
Ok(TestClient::new(app))
|
||||
}
|
||||
|
||||
|
||||
async fn validate_image_content(
|
||||
res: TestResponse,
|
||||
expected_format: image::ImageFormat,
|
||||
) -> anyhow::Result<()> {
|
||||
let body = res.0.into_body().into_bytes().await?;
|
||||
|
||||
load_from_memory_with_format(&body, expected_format)
|
||||
.expect("Invalid image returned for expected format");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_aot_upload_retrieval_without_guessing() -> anyhow::Result<()> {
|
||||
let app = setup_environment(AOT_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/webp".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::WebP).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_aot_upload_retrieval_with_guessing() -> anyhow::Result<()> {
|
||||
let app = setup_environment(AOT_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/webp".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::WebP).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_jit_upload_retrieval() -> anyhow::Result<()> {
|
||||
let app = setup_environment(JIT_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/jpeg".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::Jpeg).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_jit_upload_custom_format_retrieval() -> anyhow::Result<()> {
|
||||
let app = setup_environment(JIT_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.query("format", &"png".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/png".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::Png).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_realtime_upload_retrieval() -> anyhow::Result<()> {
|
||||
let app = setup_environment(REALTIME_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/png".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::Png).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_realtime_resizing() -> anyhow::Result<()> {
|
||||
let app = setup_environment(REALTIME_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.query("format".to_string(), &"jpeg".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.query("width".to_string(), &"500".to_string())
|
||||
.query("height".to_string(), &"500".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
res.assert_content_type(&"image/png".to_string());
|
||||
|
||||
validate_image_content(res, image::ImageFormat::Png).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_realtime_resizing_expect_err() -> anyhow::Result<()> {
|
||||
let app = setup_environment(REALTIME_CONFIG).await?;
|
||||
|
||||
let res = app.post("/v1/user-profiles")
|
||||
.body(TEST_IMAGE)
|
||||
.content_type("application/octet-stream".to_string())
|
||||
.typed_header(headers::ContentLength(TEST_IMAGE.len() as u64))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::OK);
|
||||
let info = res.json().await;
|
||||
|
||||
let file_id = info
|
||||
.value()
|
||||
.object()
|
||||
.get("image_id")
|
||||
.string();
|
||||
|
||||
let res = app.get(format!("/v1/user-profiles/{}", file_id))
|
||||
.query("width".to_string(), &"500".to_string())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
res.assert_status(StatusCode::BAD_REQUEST);
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::context::{FilterType, IndexResult, OrderBy};
|
||||
use crate::image::{ImageFormat, ImagePresetsData};
|
||||
|
||||
#[async_trait]
|
||||
pub trait DatabaseLinker {
|
||||
async fn ensure_tables(&mut self, presets: Vec<&str>, columns: Vec<ImageFormat>) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ImageStore {
|
||||
async fn get_image(
|
||||
&self,
|
||||
file_id: Uuid,
|
||||
preset: String,
|
||||
category: &str,
|
||||
format: ImageFormat,
|
||||
) -> Option<BytesMut>;
|
||||
|
||||
async fn add_image(&self, file_id: Uuid, category: &str, data: ImagePresetsData) -> Result<()>;
|
||||
|
||||
async fn remove_image(&self, file_id: Uuid, presets: Vec<&String>) -> Result<()>;
|
||||
|
||||
async fn list_entities(
|
||||
&self,
|
||||
filter: FilterType,
|
||||
order: OrderBy,
|
||||
page: usize,
|
||||
) -> Result<Vec<IndexResult>>;
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
use std::hash::Hash;
|
||||
|
||||
pub fn crc_hash<H: Hash>(v: H) -> u32 {
|
||||
let mut hasher = crc32fast::Hasher::default();
|
||||
v.hash(&mut hasher);
|
||||
hasher.finalize()
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
keydb:
|
||||
image: eqalpha/keydb:latest
|
||||
container_name: some-keydb
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- ./storage:/data
|
||||
- ./keydb.conf:/etc/keydb/keydb.conf
|
|
@ -1,13 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
mariadb:
|
||||
image: mariadb:latest
|
||||
container_name: some-maria
|
||||
ports:
|
||||
- "3306:3306"
|
||||
environment:
|
||||
- MARIADB_ROOT_PASSWORD=admin
|
||||
- MARIADB_DATABASE=maria
|
||||
volumes:
|
||||
- ./storage:/var/lib/mysql
|
|
@ -1,24 +0,0 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
some-scylla:
|
||||
image: scylladb/scylla
|
||||
container_name: some-scylla
|
||||
volumes:
|
||||
- ./storage/n1:/var/lib/scylla
|
||||
ports:
|
||||
- "9042:9042"
|
||||
|
||||
some-scylla2:
|
||||
image: scylladb/scylla
|
||||
container_name: some-scylla2
|
||||
command: --seeds=some-scylla
|
||||
volumes:
|
||||
- ./storage/n2:/var/lib/scylla
|
||||
|
||||
some-scylla3:
|
||||
image: scylladb/scylla
|
||||
container_name: some-scylla3
|
||||
command: --seeds=some-scylla
|
||||
volumes:
|
||||
- ./storage/n3:/var/lib/scylla
|
|
@ -0,0 +1,32 @@
|
|||
backend:
|
||||
filesystem: # Use the filesystem backend.
|
||||
directory: "data"
|
||||
|
||||
global_cache:
|
||||
max_images: 1000 # At most cache 1000 images.
|
||||
max_capacity: 500 # 500MB max capacity.
|
||||
|
||||
buckets:
|
||||
user-profiles: # Define a bucket called "user-profiles", this is accessable out of `/images/user-profiles`.
|
||||
mode: aot # Optimise images as and when they're required then store them.
|
||||
formats:
|
||||
png: true # Enable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: true # Enable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
webp_config:
|
||||
quality: 80 # Set lossy quality to 80%
|
||||
method: 4 # Opt on the side of performance slightly more than quality.
|
||||
threading: true # Enable multi-threaded encoding.
|
||||
|
||||
default_serving_format: webp # Serve the WebP format by default.
|
||||
default_serving_preset: medium-square # Use the "medium-square" sizing preset by default.
|
||||
|
||||
presets:
|
||||
medium-square: # Define a new resizing preset.
|
||||
width: 500 # 500px
|
||||
height: 500 # 500px
|
||||
|
||||
cache: null # Use the global cache handler.
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
backend:
|
||||
filesystem: # Use the filesystem backend.
|
||||
directory: "data"
|
||||
|
||||
global_cache:
|
||||
max_images: 1000 # At most cache 1000 images.
|
||||
max_capacity: 500 # 500MB max capacity.
|
||||
|
||||
buckets:
|
||||
user-profiles: # Define a bucket called "user-profiles", this is accessable out of `/images/user-profiles`.
|
||||
mode: jit # Optimise images as and when they're required then store them.
|
||||
formats:
|
||||
png: true # Enable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: false # Disable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
webp_config:
|
||||
quality: 80 # Set lossy quality to 80%
|
||||
method: 4 # Opt on the side of performance slightly more than quality.
|
||||
threading: true # Enable multi-threaded encoding.
|
||||
|
||||
default_serving_format: jpeg # Serve the Jpeg format by default.
|
||||
default_serving_preset: medium-square # Use the "medium-square" sizing preset by default.
|
||||
|
||||
presets:
|
||||
medium-square: # Define a new resizing preset.
|
||||
width: 500 # 500px
|
||||
height: 500 # 500px
|
||||
|
||||
cache: null # Use the global cache handler.
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
backend:
|
||||
filesystem: # Use the filesystem backend.
|
||||
directory: "data"
|
||||
|
||||
global_cache:
|
||||
max_images: 1000 # At most cache 1000 images.
|
||||
max_capacity: 500 # 500MB max capacity.
|
||||
|
||||
buckets:
|
||||
user-profiles: # Define a bucket called "user-profiles", this is accessable out of `/images/user-profiles`.
|
||||
mode: realtime # Optimise images as and when they're required then store them.
|
||||
formats:
|
||||
png: true # Disable PNG encoding.
|
||||
jpeg: true # Enable JPEG encoding.
|
||||
webp: true # Enable WebP encoding.
|
||||
gif: false # Disable GIF encoding.
|
||||
|
||||
original_image_store_format: jpeg
|
||||
|
||||
webp_config:
|
||||
quality: 80 # Set lossy quality to 80%
|
||||
method: 4 # Opt on the side of performance slightly more than quality.
|
||||
threading: true # Enable multi-threaded encoding.
|
||||
|
||||
default_serving_format: png # Serve the WebP format by default.
|
||||
default_serving_preset: medium-square # Use the "medium-square" sizing preset by default.
|
||||
|
||||
presets:
|
||||
medium-square: # Define a new resizing preset.
|
||||
width: 500 # 500px
|
||||
height: 500 # 500px
|
||||
|
||||
cache: null # Use the global cache handler.
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
FROM nginx:latest
|
||||
|
||||
# Remove the default Nginx configuration file
|
||||
RUN rm -v /etc/nginx/nginx.conf
|
||||
|
||||
# Copy a configuration file from the current directory
|
||||
ADD nginx.conf /etc/nginx/
|
||||
|
||||
ADD sample /usr/share/nginx/html/
|
||||
ADD sample /var/www/html/
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 90
|
|
@ -1,61 +0,0 @@
|
|||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
include /etc/nginx/modules-enabled/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# SSL Settings
|
||||
##
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
|
||||
server {
|
||||
root /usr/share/nginx/html/;
|
||||
autoindex on;
|
||||
listen 90;
|
||||
}
|
||||
}
|
Before Width: | Height: | Size: 89 KiB |
|
@ -1,40 +0,0 @@
|
|||
import base64
|
||||
import aiohttp
|
||||
import asyncio
|
||||
|
||||
queue = asyncio.Queue()
|
||||
|
||||
|
||||
def get_base_data(file: str) -> str:
|
||||
with open(file, "rb") as file:
|
||||
data = file.read()
|
||||
return base64.standard_b64encode(data).decode("utf-8")
|
||||
|
||||
|
||||
async def task():
|
||||
data = get_base_data("./samples/news.png")
|
||||
async with aiohttp.ClientSession() as sess:
|
||||
while not queue.empty():
|
||||
_ = await queue.get()
|
||||
async with sess.post(
|
||||
"http://127.0.0.1:7070/admin/create/image",
|
||||
json={"format": "png", "data": data}
|
||||
) as resp:
|
||||
assert resp.status == 200
|
||||
await asyncio.sleep(0.2)
|
||||
|
||||
|
||||
async def main():
|
||||
for _ in range(200_000):
|
||||
queue.put_nowait(None)
|
||||
|
||||
tasks = [task() for _ in range(1)]
|
||||
t = asyncio.ensure_future(asyncio.gather(*tasks))
|
||||
|
||||
while not queue.empty() and not t.done():
|
||||
print(f"currently, {queue.qsize()} in queue")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(main())
|
Before Width: | Height: | Size: 89 KiB |
Before Width: | Height: | Size: 149 KiB |
109
tests/unit.py
|
@ -1,109 +0,0 @@
|
|||
import base64
|
||||
import requests
|
||||
import uuid
|
||||
|
||||
working_ids = {}
|
||||
|
||||
|
||||
def get_base_data(file: str) -> str:
|
||||
with open(file, "rb") as file:
|
||||
data = file.read()
|
||||
print(f"original {len(data)}")
|
||||
return base64.standard_b64encode(data).decode("utf-8")
|
||||
|
||||
|
||||
def test_png_upload1():
|
||||
global working_ids
|
||||
data = get_base_data("./samples/sunset.jpeg")
|
||||
payload = {
|
||||
"format": "jpeg",
|
||||
"data": data,
|
||||
}
|
||||
r = requests.post("http://127.0.0.1:7070/admin/create/image", json=payload)
|
||||
data = r.json()
|
||||
|
||||
assert r.status_code == 200
|
||||
assert data['data']['category'] == "default"
|
||||
|
||||
file_id = data['data']['file_id']
|
||||
working_ids['default'] = file_id
|
||||
print(file_id)
|
||||
|
||||
|
||||
def test_get_img_default():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_webp():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=webp")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_png():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=png")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_jpeg():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=jpeg")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_format_gif():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?format=gif")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_large():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=large")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_medium():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=medium")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_img_preset_small():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{working_ids['default']}?preset=small")
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_get_nothing1():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}")
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
def test_get_nothing2():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=png")
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
def test_get_nothing3():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=jpeg")
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
def test_get_nothing4():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=webp")
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
def test_get_nothing5():
|
||||
r = requests.get(f"http://127.0.0.1:7070/images/{uuid.uuid4()}?format=gif")
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
def test_remove_img1():
|
||||
r = requests.delete(
|
||||
f"http://127.0.0.1:7070/admin/delete/image/44524a33-c505-476d-b23b-c42de1fd796a")
|
||||
print(r.content)
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_png_upload1()
|
||||
test_get_img_default()
|
||||
test_get_nothing1()
|
||||
# test_remove_img1()
|
|
@ -8,5 +8,5 @@ edition = "2018"
|
|||
|
||||
[dependencies]
|
||||
libwebp-sys = "0.3.2"
|
||||
image = "0.23"
|
||||
once_cell = "1.8.0"
|
||||
image = "0.24"
|
||||
anyhow = "1"
|
|
@ -1,13 +1,13 @@
|
|||
use std::fmt::{Debug, Error, Formatter};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use image::{DynamicImage, GenericImageView, RgbaImage};
|
||||
use anyhow::{Result, anyhow};
|
||||
use image::{DynamicImage, RgbaImage};
|
||||
use libwebp_sys::WebPEncodingError::VP8_ENC_OK;
|
||||
use libwebp_sys::WebPPreset::WEBP_PRESET_DEFAULT;
|
||||
use libwebp_sys::*;
|
||||
use once_cell::sync::OnceCell;
|
||||
pub use libwebp_sys::WebPConfig;
|
||||
|
||||
static CONFIG: OnceCell<WebPConfig> = OnceCell::new();
|
||||
|
||||
/// Inits the global encoder config.
|
||||
///
|
||||
|
@ -21,8 +21,8 @@ static CONFIG: OnceCell<WebPConfig> = OnceCell::new();
|
|||
///
|
||||
/// - multi_threading:
|
||||
/// If the system should to attempt to use in multi-threaded encoding.
|
||||
pub fn init_global(lossless: bool, quality: f32, method: i32, multi_threading: bool) {
|
||||
let cfg = WebPConfig {
|
||||
pub fn config(lossless: bool, quality: f32, method: i32, multi_threading: bool) -> WebPConfig {
|
||||
WebPConfig {
|
||||
lossless: if lossless { 1 } else { 0 },
|
||||
quality,
|
||||
method,
|
||||
|
@ -51,9 +51,7 @@ pub fn init_global(lossless: bool, quality: f32, method: i32, multi_threading: b
|
|||
use_delta_palette: 0,
|
||||
use_sharp_yuv: 0,
|
||||
pad: [100, 100],
|
||||
};
|
||||
|
||||
let _ = CONFIG.set(cfg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Picture is uninitialized.
|
||||
|
@ -118,6 +116,7 @@ pub enum PixelLayout {
|
|||
}
|
||||
|
||||
pub struct Encoder<'a> {
|
||||
cfg: WebPConfig,
|
||||
layout: PixelLayout,
|
||||
image: &'a [u8],
|
||||
width: u32,
|
||||
|
@ -126,30 +125,25 @@ pub struct Encoder<'a> {
|
|||
|
||||
impl<'a> Encoder<'a> {
|
||||
/// Creates a new encoder from the given image.
|
||||
pub fn from_image(image: &'a DynamicImage) -> Self {
|
||||
pub fn from_image(cfg: WebPConfig, image: &'a DynamicImage) -> Self {
|
||||
match image {
|
||||
DynamicImage::ImageRgb8(image) => {
|
||||
Self::from_rgb(image.as_ref(), image.width(), image.height())
|
||||
Self::from_rgb(cfg, image.as_ref(), image.width(), image.height())
|
||||
},
|
||||
DynamicImage::ImageRgba8(image) => {
|
||||
Self::from_rgba(image.as_ref(), image.width(), image.height())
|
||||
},
|
||||
DynamicImage::ImageBgr8(image) => {
|
||||
Self::from_bgr(image.as_ref(), image.width(), image.height())
|
||||
},
|
||||
DynamicImage::ImageBgra8(image) => {
|
||||
Self::from_bgra(image.as_ref(), image.width(), image.height())
|
||||
Self::from_rgba(cfg,image.as_ref(), image.width(), image.height())
|
||||
},
|
||||
other => {
|
||||
let image = other.to_rgba8();
|
||||
Self::from_other(other.as_bytes(), other.width(), other.height(), image)
|
||||
Self::from_other(cfg,other.as_bytes(), other.width(), other.height(), image)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new encoder from the given image data in the RGB pixel layout.
|
||||
pub fn from_rgb(image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
pub fn from_rgb(cfg: WebPConfig, image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
Self {
|
||||
cfg,
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
|
@ -158,8 +152,9 @@ impl<'a> Encoder<'a> {
|
|||
}
|
||||
|
||||
/// Creates a new encoder from the given image data in the RGBA pixel layout.
|
||||
pub fn from_rgba(image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
pub fn from_rgba(cfg: WebPConfig, image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
Self {
|
||||
cfg,
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
|
@ -167,30 +162,11 @@ impl<'a> Encoder<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Creates a new encoder from the given image data in the BGR pixel layout.
|
||||
pub fn from_bgr(image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
Self {
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
layout: PixelLayout::BGR,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new encoder from the given image data in the BGRA pixel layout.
|
||||
pub fn from_bgra(image: &'a [u8], width: u32, height: u32) -> Self {
|
||||
Self {
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
layout: PixelLayout::BGRA,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new encoder from the given image data in the Other layout,
|
||||
/// this creates a copy of the data to convert it to RGBA.
|
||||
pub fn from_other(image: &'a [u8], width: u32, height: u32, other: RgbaImage) -> Self {
|
||||
pub fn from_other(cfg: WebPConfig, image: &'a [u8], width: u32, height: u32, other: RgbaImage) -> Self {
|
||||
Self {
|
||||
cfg,
|
||||
image,
|
||||
width,
|
||||
height,
|
||||
|
@ -199,28 +175,26 @@ impl<'a> Encoder<'a> {
|
|||
}
|
||||
|
||||
/// Encode the image with the given global config.
|
||||
pub fn encode(&self) -> WebPMemory {
|
||||
pub fn encode(self) -> Result<WebPMemory> {
|
||||
let (img, layout) = if let PixelLayout::Other(img) = &self.layout {
|
||||
(img.as_ref(), &PixelLayout::RGBA)
|
||||
} else {
|
||||
(self.image.as_ref(), &self.layout)
|
||||
};
|
||||
|
||||
unsafe { encode(img, layout, self.width, self.height) }
|
||||
unsafe { encode(self.cfg, img, layout, self.width, self.height) }
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! check_ok {
|
||||
( $e:expr, $msg:expr ) => {{
|
||||
if $e == 0 {
|
||||
panic!("{}", $msg);
|
||||
return Err(anyhow!("{}", $msg));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
unsafe fn encode(image: &[u8], layout: &PixelLayout, width: u32, height: u32) -> WebPMemory {
|
||||
let cfg = CONFIG.get().expect("config un-initialised.").clone();
|
||||
|
||||
unsafe fn encode(cfg: WebPConfig, image: &[u8], layout: &PixelLayout, width: u32, height: u32) -> Result<WebPMemory> {
|
||||
let picture = empty_webp_picture();
|
||||
let writer = WebPMemoryWriter {
|
||||
mem: std::ptr::null_mut::<u8>(),
|
||||
|
@ -283,13 +257,13 @@ unsafe fn encode(image: &[u8], layout: &PixelLayout, width: u32, height: u32) ->
|
|||
WebPPictureFree(picture_ptr);
|
||||
if ok == 0 {
|
||||
WebPMemoryWriterClear(writer_ptr);
|
||||
panic!(
|
||||
return Err(anyhow!(
|
||||
"memory error. libwebp error code: {:?}",
|
||||
(*picture_ptr).error_code
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
WebPMemory((*writer_ptr).mem, (*writer_ptr).size)
|
||||
Ok(WebPMemory((*writer_ptr).mem, (*writer_ptr).size))
|
||||
}
|
||||
|
||||
/// This struct represents a safe wrapper around memory owned by libwebp.
|
||||
|
@ -329,7 +303,7 @@ mod tests {
|
|||
use super::*;
|
||||
|
||||
fn ensure_global() {
|
||||
init_global(true, 50.0, 6, true)
|
||||
config(true, 50.0, 6, true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|