Initial Commit

This commit is contained in:
kossLAN 2024-10-23 04:44:29 -04:00
parent 5b45525331
commit 7f8d940ef5
Signed by: kossLAN
SSH key fingerprint: SHA256:bdV0x+wdQHGJ6LgmstH3KV8OpWY+OOFmJcPcB0wQPV8
12 changed files with 2901 additions and 2 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
/target
tinyupload.db*

2301
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

18
Cargo.toml Normal file
View file

@ -0,0 +1,18 @@
[package]
name = "tinyupload"
version = "0.2.0"
edition = "2021"
[dependencies]
axum = { version = "0.7.7", features = ["multipart"] }
serde = { version = "1.0", features = ["derive"] }
tokio = { version = "1.0", features = ["full"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
sqlx = { version = "0.8.2", features = [ "runtime-tokio", "sqlite" ] }
rand = "0.8.5"
sha2 = "0.10.8"
base64 = "0.22.1"
clap = { version = "4.5.20", features = ["derive"] }
chrono = "0.4.38"
async-stream = "0.3.6"
bytes = "1.8.0"

View file

@ -1,3 +1,18 @@
# tinyupload # What is tinyupload?
A server program that allows uploads. /shrug Essentially tinyupload is a personal cdn, that you can host yourself. Why? mostly for quicksharing content to friends/others without being limited by whatever social chat platform you are on, in this case the main target is Discord.
## Motivation
I made this to solve a problem I have with discord, by using a vencord plugin eventually I'll be able to just upload directly to my instance of tinyupload and get past Discord's low 10mb upload limit, without having to pay $10/month to do so.
## RoadMap
- [ ] Server Side Encryption
- [ ] Vencord Plugin
- [ ] Config File
*and probably many others*
## Acknowledgements
This was really my first fully fledged project in rust, which was made a lot easier from the help of [outfoxxed](https://github.com/outfoxxed), check out some of the stuff he does.

26
flake.lock generated Normal file
View file

@ -0,0 +1,26 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1729453369,
"narHash": "sha256-UDysmG2kJWozN0JaZHb/hRW5GL+TK5gM4RaCHMkkrVY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "4deb3c835f2eb2e11be784d458bbdcf4affe412b",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

33
flake.nix Normal file
View file

@ -0,0 +1,33 @@
{
description = "A simple rust project";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs";
};
outputs = {
self,
nixpkgs,
}: let
forEachSystem = fn:
nixpkgs.lib.genAttrs
["x86_64-linux" "aarch64-linux"]
(system: fn system nixpkgs.legacyPackages.${system});
in {
# Define a package for your project
packages = forEachSystem (system: pkgs: rec {
tinyupload = pkgs.callPackage ./nix/package.nix {
inherit pkgs;
};
default = tinyupload;
});
devShells = forEachSystem (system: pkgs: {
default = pkgs.mkShell {
DATABASE_URL = "sqlite:tinyupload.db";
buildInputs = with pkgs; [cargo rustc sqlx-cli sqlite];
};
});
};
}

View file

@ -0,0 +1,5 @@
-- Add migration script here
CREATE TABLE IF NOT EXISTS keys (
id INTEGER PRIMARY KEY,
key VARCHAR(32)
);

View file

@ -0,0 +1,9 @@
-- Add migration script here
CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
filename VARCHAR(64) NOT NULL,
mime_type VARCHAR(8) NOT NULL,
hash VARCHAR(64) NOT NULL,
last_modified DATE,
etag VARCHAR(32)
);

26
nix/package.nix Normal file
View file

@ -0,0 +1,26 @@
{
stdenv,
pkgs,
...
}:
stdenv.mkDerivation {
pname = "tinyupload";
version = "1.0.0";
src = ../.;
buildInputs = [pkgs.cargo];
# Specify the build commands
buildPhase = ''
cargo build --release
'';
# Optionally, specify installPhase if needed
installPhase = ''
# If you have specific install steps, add them here
# For example, copying files to $out
mkdir -p $out/bin
cp target/release/tinyupload $out/bin
'';
}

126
src/database.rs Normal file
View file

@ -0,0 +1,126 @@
use base64::{engine::general_purpose, Engine as _};
use rand::Rng;
use sha2::{Digest, Sha256};
use sqlx::migrate::{MigrateDatabase, Migrator};
use sqlx::sqlite::SqlitePool;
#[derive(sqlx::FromRow)]
struct KeyRow {
key: String,
}
#[derive(sqlx::FromRow)]
pub struct FileRow {
pub mime_type: String,
pub last_modified: String,
pub etag: String,
}
static MIGRATOR: Migrator = sqlx::migrate!("./migrations");
pub async fn init(database_url: String) -> Result<SqlitePool, sqlx::Error> {
// If the database doesn't exist, create it and run the migrations
if !sqlx::Sqlite::database_exists(&database_url).await? {
sqlx::Sqlite::create_database(&database_url).await?;
let pool = SqlitePool::connect(&database_url).await?;
MIGRATOR.run(&pool).await?;
pool.close().await;
}
let pool = SqlitePool::connect(&database_url).await?;
Ok(pool)
}
pub async fn check_key(pool: &SqlitePool, key: String) -> Result<bool, sqlx::Error> {
let rows = sqlx::query_as::<_, KeyRow>(r#"SELECT * FROM keys"#)
.fetch_all(pool)
.await?;
for row in rows {
if row.key == key {
return Ok(true);
}
}
return Ok(false);
}
pub async fn add_key(pool: &SqlitePool) -> Result<String, sqlx::Error> {
let key = generate_api_key();
sqlx::query("INSERT INTO keys(key) VALUES (?)")
.bind(&key)
.execute(pool)
.await?;
Ok(key)
}
fn generate_api_key() -> String {
let mut random_bytes = [0u8; 32];
let mut rng = rand::thread_rng();
rng.fill(&mut random_bytes);
let encoded_bytes = general_purpose::STANDARD.encode(random_bytes);
let mut hasher = Sha256::new();
hasher.update(encoded_bytes);
let result = hasher.finalize();
format!("{:x}", result)
}
pub async fn add_file(
pool: &SqlitePool,
filename: &str,
mime_type: &str,
hash: &str,
last_modified: String,
etag: String,
) -> Result<(), sqlx::Error> {
sqlx::query(
"
INSERT INTO files(filename, mime_type, hash, last_modified, etag)
VALUES (?, ?, ?, ?, ?)
",
)
.bind(filename)
.bind(mime_type)
.bind(hash)
.bind(last_modified)
.bind(etag)
.execute(pool)
.await?;
Ok(())
}
pub async fn check_file_exists(pool: &SqlitePool, hash: &str) -> Result<bool, sqlx::Error> {
let row = sqlx::query_as::<_, FileRow>(
r#"
SELECT * FROM files
WHERE hash = ?
"#,
)
.bind(hash)
.fetch_optional(pool)
.await?;
if let Some(_) = row {
return Ok(true);
} else {
return Ok(false);
}
}
pub async fn get_entry(pool: &SqlitePool, hash: String) -> Result<FileRow, sqlx::Error> {
let row = sqlx::query_as::<_, FileRow>(
r#"
SELECT * FROM files
WHERE hash = ?
"#,
)
.bind(hash)
.fetch_one(pool)
.await?;
Ok(row)
}

55
src/main.rs Normal file
View file

@ -0,0 +1,55 @@
use clap::{Parser, Subcommand};
mod database;
mod server;
#[derive(Parser)]
#[command(version, about, long_about = None)]
#[command(propagate_version = true)]
struct Cli {
/// The location of the sqlite database.
#[arg(short, long, default_value = "sqlite:tinyupload.db")]
database_url: String,
/// Path to the directory where the files will be stored.
#[arg(short, long, default_value = "files")]
files_path: String,
/// The address to listen on.
#[arg(short, long, default_value = "0.0.0.0:1337")]
address: String,
// The upload limit in Mb.
#[arg(short, long, default_value_t = 100)]
upload_size: usize,
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Runs the web server.
Serve {},
/// Generate a new key.
Generate {},
}
#[tokio::main]
async fn main() {
let cli = Cli::parse();
match &cli.command {
Commands::Serve {} => {
println!("Starting server on {}", cli.address);
let db = database::init(cli.database_url).await.unwrap();
server::init(db, cli.files_path, cli.upload_size, cli.address).await;
}
Commands::Generate {} => {
let db = database::init(cli.database_url).await.unwrap();
let key = database::add_key(&db).await.expect("Failed to add key");
println!("Copy this generated key: {}", key);
}
}
}

282
src/server.rs Normal file
View file

@ -0,0 +1,282 @@
use crate::database;
use axum::{
body::Body,
extract::{DefaultBodyLimit, Multipart, Path, State},
http::{
header::{self, HeaderMap},
StatusCode,
},
response::{IntoResponse, Response},
routing::{get, post},
Router,
};
use chrono::Utc;
use serde::Deserialize;
use sqlx::SqlitePool;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::io::{AsyncReadExt, AsyncSeekExt};
#[derive(Clone)]
struct AppState {
db: SqlitePool,
files_path: String,
}
#[derive(Deserialize)]
struct Media {
urlpath: String,
}
pub async fn init(db: SqlitePool, files_path: String, upload_size: usize, address: String) {
// Initialize the state
let state = AppState { db, files_path };
// Initialize tracing
tracing_subscriber::fmt::init();
// Build our application with a route
let app = Router::new()
// Testing serving a file
.route("/:urlpath", get(stream))
.route("/upload", post(upload))
.with_state(state)
.layer(DefaultBodyLimit::max(1024 * 1024 * upload_size));
let listener = tokio::net::TcpListener::bind(address).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
async fn stream(
State(state): State<AppState>,
Path(Media { urlpath }): Path<Media>,
headers: HeaderMap,
) -> impl IntoResponse {
let (hash, _) = match parse_filename(&urlpath) {
(name, Some(ext)) => (name, ext),
(name, None) => (name, ""),
};
if database::check_file_exists(&state.db, &hash).await.unwrap() {
let full_path = format!("{}/{}", state.files_path, hash);
let file_entry = database::get_entry(&state.db, hash.to_string())
.await
.unwrap();
println!("Serving file: ({})", full_path);
let file = match File::open(&full_path).await {
Ok(file) => file,
Err(e) => {
println!("Error opening file: {:?}", e);
return Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty())
.unwrap();
}
};
let file_length = file.metadata().await.unwrap().len();
let range_header = headers
.get(header::RANGE)
.and_then(|header| header.to_str().ok());
let (start, end) = if let Some(range_header) = range_header {
parse_range_header(range_header, file_length).unwrap_or((0, file_length - 1))
} else {
(0, file_length - 1)
};
let content_length = end - start + 1;
let content_range = format!("bytes {}-{}/{}", start, end, file_length);
let stream = async_stream::stream! {
const CHUNK_SIZE: usize = 64 * 1024; // 64KB chunks
let mut file = file;
let mut position = start;
let mut buffer = vec![0; CHUNK_SIZE];
file.seek(std::io::SeekFrom::Start(position)).await.unwrap();
while position <= end {
let remaining = (end - position + 1) as usize;
let chunk_size = CHUNK_SIZE.min(remaining);
let buf = &mut buffer[..chunk_size];
match file.read_exact(buf).await {
Ok(_) => {
yield Ok(bytes::Bytes::copy_from_slice(buf));
position += chunk_size as u64;
}
Err(e) => {
yield Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to read file: {}", e),
));
break;
}
}
}
};
return Response::builder()
.status(StatusCode::PARTIAL_CONTENT)
.header(header::CONTENT_TYPE, file_entry.mime_type)
.header(header::ACCEPT_RANGES, "bytes")
.header(header::CONTENT_RANGE, content_range)
.header(header::CONTENT_LENGTH, content_length)
.header(header::CACHE_CONTROL, "public, max-age=31536000")
.header(header::LAST_MODIFIED, file_entry.last_modified)
.header(header::ETAG, file_entry.etag)
.body(Body::from_stream(stream))
.unwrap();
} else {
println!("Request for a file not found with hash: ({})", hash);
return Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty())
.unwrap();
}
}
fn parse_range_header(h: &str, len: u64) -> Option<(u64, u64)> {
h.strip_prefix("bytes=")?
.split_once('-')
.and_then(|(s, e)| {
let start = s.parse().ok()?;
let end = e.parse().unwrap_or(start + (1 << 20).min(len - start - 1));
(start < len && end < len && start <= end).then_some((start, end))
})
}
// Everything below is for uploading functionality
async fn upload(
State(state): State<AppState>,
headers: HeaderMap,
mut multipart: Multipart,
) -> Response {
// Create a response builder
let mut response_builder = Response::builder();
// Add CORS headers
response_builder = response_builder.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
// Key validation
let key = headers
.get("x-api-key")
.unwrap_or(&"None".parse().unwrap())
.to_str()
.unwrap()
.into();
if !database::check_key(&state.db, key).await.unwrap_or(false) {
println!("Attempt to upload without a valid key.");
return response_builder
.status(StatusCode::FORBIDDEN)
.body(axum::body::Body::from("Access Denied."))
.unwrap();
}
// Processing the multipart form data
while let Some(field) = multipart.next_field().await.unwrap_or(None) {
let (name, ext) = parse_filename(field.name().unwrap());
let (name, ext) = (name.to_string(), ext.unwrap_or("").to_string());
let data = match field.bytes().await {
Ok(data) => data,
Err(e) => {
println!(
"Error reading field data: {:?}
(Chances are that the upload size is not enough)",
e
);
continue;
}
};
// Hash file name
let mut s = DefaultHasher::new();
name.hash(&mut s);
let hash = s.finish();
let full_path = format!("{}/{}", state.files_path, hash);
// Write the file data to disk :P
let mut file = match File::create(&full_path).await {
Ok(file) => file,
Err(e) => {
println!(
"Error creating file at: {}
{:?}",
full_path, e
);
continue;
}
};
match file.write_all(&data).await {
Ok(_) => {
println!("New file uploaded to {}", full_path);
}
Err(e) => {
println!("Error writing to file: {:?}", e);
continue;
}
};
let content_type = match ext.as_str() {
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"gif" => "image/gif",
"json" => "application/json",
"txt" => "text/plain",
"mp4" => "video/mp4",
"webm" => "video/webm",
_ => "application/octet-stream",
};
database::add_file(
&state.db,
&name,
&content_type,
&hash.to_string(),
Utc::now().to_string(),
generate_etag(&full_path),
)
.await
.unwrap();
// The file name doesn't matter here we just spoof it
let rel_path = format!("{}.{}", hash, ext);
return response_builder
.status(StatusCode::OK)
.body(axum::body::Body::from(rel_path))
.unwrap();
}
response_builder
.status(StatusCode::NO_CONTENT)
.body(axum::body::Body::from("No files uploaded."))
.unwrap()
}
fn generate_etag(path: &str) -> String {
let modified_secs = Utc::now().timestamp();
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
modified_secs.hash(&mut hasher);
format!("\"{:x}\"", hasher.finish())
}
fn parse_filename(filename: &str) -> (&str, Option<&str>) {
match filename.rsplit_once('.') {
Some((name, ext)) => (name, Some(ext)),
None => (filename, None),
}
}