Browse Source

rustfmt

pull/1/head
Stephen 5 months ago
parent
commit
deeac229fa
2 changed files with 150 additions and 132 deletions
  1. +2
    -0
      rustfmt.toml
  2. +148
    -132
      src/main.rs

+ 2
- 0
rustfmt.toml View File

@ -0,0 +1,2 @@
tab_spaces = 4
hard_tabs = true

+ 148
- 132
src/main.rs View File

@ -1,162 +1,178 @@
use std::collections::HashMap;
use rand::seq::SliceRandom;
use rand::thread_rng;
use serde::Deserialize;
use std::{fs, cmp};
use std::collections::HashMap;
use std::sync::{Arc, Mutex, RwLock};
use warp::{http, Filter, Reply};
use std::time::{Duration, Instant};
use std::{cmp, fs};
use tokio::time;
use std::time::{Instant, Duration};
use rand::seq::SliceRandom;
use rand::thread_rng;
use warp::{http, Filter, Reply};
const BLACKLIST_DELAY_TIME_MINS: u64 = 15;
const ONE_YEAR: Duration = Duration::from_secs(60 * 60 * 24 * 365);
#[derive(Deserialize, Debug)]
struct WebsiteConfig { // May add more here in the future
delay: u32 // seconds
struct WebsiteConfig {
// May add more here in the future
delay: u32, // seconds
}
#[derive(Deserialize, Debug)]
struct Config {
proxies: Vec<String>,
websites: HashMap<String, WebsiteConfig>
proxies: Vec<String>,
websites: HashMap<String, WebsiteConfig>,
}
#[derive(Clone)]
struct Website {
proxies: HashMap<String, Instant>,
index: usize,
delay: u32
proxies: HashMap<String, Instant>,
index: usize,
delay: u32,
}
#[derive(Clone)]
struct ServerState {
websites: HashMap<String, Arc<Mutex<Website>>>,
proxies: Vec<String>
websites: HashMap<String, Arc<Mutex<Website>>>,
proxies: Vec<String>,
}
#[tokio::main]
async fn main() {
let mut websites: HashMap<String, Arc<Mutex<Website>>> = HashMap::new();
// TODO don't hard-code the file name
let config: Config = toml::from_str(
&fs::read_to_string("config.example.toml").unwrap()
).unwrap();
// Populate the proxies
for (site, site_conf) in &config.websites {
let mut hsh: HashMap<String, Instant> = HashMap::new();
for proxy in &config.proxies {
// Q: Why are we adding the delay time here?
// A: So that if the server restarts, we don't immediately start handing out proxies
// which were just used a second ago (causing the rate limit to be exceeded)
hsh.insert(proxy.clone(),
Instant::now() + Duration::from_secs(site_conf.delay.into()));
}
websites.insert(site.clone(), Arc::new(Mutex::new(Website {
proxies: hsh,
index: 0,
delay: config.websites[site].delay
})));
}
let state = Arc::new(RwLock::new(ServerState {
websites, proxies: config.proxies
}));
let get_proxy = {
let state = state.clone();
warp::get()
.and(warp::path!("v1" / String))
.and(warp::any().map(move || state.clone()))
.and_then(get_proxy_fn)
};
let blacklist_proxy = warp::post()
.and(warp::path!("v1" / String / String))
.and(warp::any().map(move || state.clone()))
.and_then(blacklist_proxy_fn);
warp::serve(get_proxy.or(blacklist_proxy))
.run(([0, 0, 0, 0], 3030))
.await;
let mut websites: HashMap<String, Arc<Mutex<Website>>> = HashMap::new();
// TODO don't hard-code the file name
let config: Config =
toml::from_str(&fs::read_to_string("config.example.toml").unwrap()).unwrap();
// Populate the proxies
for (site, site_conf) in &config.websites {
let mut hsh: HashMap<String, Instant> = HashMap::new();
for proxy in &config.proxies {
// Q: Why are we adding the delay time here?
// A: So that if the server restarts, we don't immediately start handing out proxies
// which were just used a second ago (causing the rate limit to be exceeded)
hsh.insert(
proxy.clone(),
Instant::now() + Duration::from_secs(site_conf.delay.into()),
);
}
websites.insert(
site.clone(),
Arc::new(Mutex::new(Website {
proxies: hsh,
index: 0,
delay: config.websites[site].delay,
})),
);
}
let state = Arc::new(RwLock::new(ServerState {
websites,
proxies: config.proxies,
}));
let get_proxy = {
let state = state.clone();
warp::get()
.and(warp::path!("v1" / String))
.and(warp::any().map(move || state.clone()))
.and_then(get_proxy_fn)
};
let blacklist_proxy = warp::post()
.and(warp::path!("v1" / String / String))
.and(warp::any().map(move || state.clone()))
.and_then(blacklist_proxy_fn);
warp::serve(get_proxy.or(blacklist_proxy))
.run(([0, 0, 0, 0], 3030))
.await;
}
async fn get_proxy_fn(website_str: String, state: Arc<RwLock<ServerState>>)
-> Result<impl warp::Reply, warp::Rejection> {
let (proxy_address, proxy_time) = {
let state = state.read().unwrap();
match state.websites.get(&website_str) {
Some(x) => {
let mut website = x.lock().unwrap();
// TODO Is there a better way to do this?
let mut proxies: Vec<(String, Instant)> = website.proxies.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
proxies.shuffle(&mut thread_rng());
let mut min_proxy_address: String = "".to_string();
let mut min_time = Instant::now() + ONE_YEAR;
for (proxy_address, proxy_time) in &proxies {
if proxy_time < &min_time {
min_time = *proxy_time;
min_proxy_address = proxy_address.to_string();
if min_time < Instant::now() {
// We've found a proxy with no delay. Might as well use it
break;
}
}
};
let dur = Duration::from_secs(website.delay.into());
*website.proxies.get_mut(&min_proxy_address).unwrap() =
cmp::max(min_time, Instant::now()) + dur;
(min_proxy_address, min_time)
}
None => {
return Ok(
warp::reply::with_status("Website not found",
http::status::StatusCode::NOT_FOUND)
.into_response()
)
}
}
};
time::delay_until(proxy_time.into()).await; // Wait until proxy is ready
Ok(warp::reply::json(&proxy_address).into_response())
async fn get_proxy_fn(
website_str: String,
state: Arc<RwLock<ServerState>>,
) -> Result<impl warp::Reply, warp::Rejection> {
let (proxy_address, proxy_time) = {
let state = state.read().unwrap();
match state.websites.get(&website_str) {
Some(x) => {
let mut website = x.lock().unwrap();
// TODO Is there a better way to do this?
let mut proxies: Vec<(String, Instant)> = website
.proxies
.iter()
.map(|(k, v)| (k.clone(), *v))
.collect();
proxies.shuffle(&mut thread_rng());
let mut min_proxy_address: String = "".to_string();
let mut min_time = Instant::now() + ONE_YEAR;
for (proxy_address, proxy_time) in &proxies {
if proxy_time < &min_time {
min_time = *proxy_time;
min_proxy_address = proxy_address.to_string();
if min_time < Instant::now() {
// We've found a proxy with no delay. Might as well use it
break;
}
}
}
let dur = Duration::from_secs(website.delay.into());
*website.proxies.get_mut(&min_proxy_address).unwrap() =
cmp::max(min_time, Instant::now()) + dur;
(min_proxy_address, min_time)
}
None => {
return Ok(warp::reply::with_status(
"Website not found",
http::status::StatusCode::NOT_FOUND,
)
.into_response())
}
}
};
time::delay_until(proxy_time.into()).await; // Wait until proxy is ready
Ok(warp::reply::json(&proxy_address).into_response())
}
// Marks a proxy as blacklisted(not working, blocked by website, etc.)
// Will auto-unblacklist after 15 minutes
async fn blacklist_proxy_fn(website_str: String, proxy: String, state: Arc<RwLock<ServerState>>)
-> Result<impl warp::Reply, warp::Rejection> {
let state = state.read().unwrap();
match state.websites.get(&website_str) {
Some(x) => {
match x.lock().unwrap().proxies.get_mut(&proxy) {
Some(y) => {
*y = Instant::now() + Duration::from_secs(60 * BLACKLIST_DELAY_TIME_MINS);
return Ok(warp::reply::with_status("Success",
http::status::StatusCode::OK)
.into_response())
}
None => {
return Ok(warp::reply::with_status("Proxy not found",
http::status::StatusCode::NOT_FOUND)
.into_response())
}
}
}
None => {
return Ok(warp::reply::with_status("Website not found",
http::status::StatusCode::NOT_FOUND)
.into_response())
}
}
}
async fn blacklist_proxy_fn(
website_str: String,
proxy: String,
state: Arc<RwLock<ServerState>>,
) -> Result<impl warp::Reply, warp::Rejection> {
let state = state.read().unwrap();
match state.websites.get(&website_str) {
Some(x) => match x.lock().unwrap().proxies.get_mut(&proxy) {
Some(y) => {
*y = Instant::now() + Duration::from_secs(60 * BLACKLIST_DELAY_TIME_MINS);
Ok(
warp::reply::with_status("Success", http::status::StatusCode::OK)
.into_response(),
)
}
None => {
Ok(warp::reply::with_status(
"Proxy not found",
http::status::StatusCode::NOT_FOUND,
)
.into_response())
}
},
None => {
Ok(warp::reply::with_status(
"Website not found",
http::status::StatusCode::NOT_FOUND,
)
.into_response())
}
}
}

Loading…
Cancel
Save