diff --git a/Cargo.toml b/Cargo.toml index ea0bcb7..4e6d0de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ chrono = { version = "0.4.37" } clap = { version = "4.5.3", features = ["derive"], optional = true } colored = { version = "2.1.0", optional = false } libpt = "0.4.2" +num_cpus = "1.16.0" rand = "0.8.5" rayon = "1.10.0" regex = "1.10.3" diff --git a/src/bench/builtin.rs b/src/bench/builtin.rs index 37cb097..ec88527 100644 --- a/src/bench/builtin.rs +++ b/src/bench/builtin.rs @@ -1,14 +1,20 @@ +use std::sync::{Arc, Mutex}; + +use libpt::log::info; + +use crate::error::{BenchError, Error, WResult}; use crate::game::{Game, GameBuilder}; use crate::solve::Solver; use crate::wlist::WordList; -use super::Benchmark; +use super::{Benchmark, Report}; #[derive(Debug, Clone)] pub struct BuiltinBenchmark<'wl, WL: WordList, SL: Solver<'wl, WL>> { wordlist: &'wl WL, solver: SL, builder: GameBuilder<'wl, WL>, + report: Arc>, } impl<'wl, WL, SL> Benchmark<'wl, WL, SL> for BuiltinBenchmark<'wl, WL, SL> @@ -22,10 +28,14 @@ where wordlist: &'wl WL, solver: SL, builder: GameBuilder<'wl, WL>, + threads: usize ) -> crate::error::WResult { + info!("using {threads} threads for benchmarking"); + rayon::ThreadPoolBuilder::new().num_threads(threads).build_global().unwrap(); Ok(Self { wordlist, solver, + report: Arc::new(Mutex::new(Report::new(builder.build()?))), builder, }) } @@ -35,4 +45,12 @@ where fn builder(&'wl self) -> &'wl crate::game::GameBuilder<'wl, WL> { &self.builder } + + fn report_mutex(&'wl self) -> Arc> { + self.report.clone() + } + + fn report(&'wl self) -> super::Report { + self.report.lock().expect("lock is poisoned").clone() + } } diff --git a/src/bench/mod.rs b/src/bench/mod.rs index dcc3bd3..ae0234e 100644 --- a/src/bench/mod.rs +++ b/src/bench/mod.rs @@ -30,6 +30,7 @@ where wordlist: &'wl WL, solver: SL, builder: GameBuilder<'wl, WL>, + threads: usize ) -> crate::error::WResult; fn builder(&'wl self) -> &'wl GameBuilder<'wl, WL>; fn make_game(&'wl self) -> WResult> { @@ -42,11 +43,7 @@ where // TODO: add some interface to get reports while the benchmark runs // TODO: make the benchmark optionally multithreaded fn bench(&'wl self, n: usize) -> WResult { - let part = match n / 20 { - 0 => 19, - other => other, - }; - let report = Arc::new(Mutex::new(Report::new(self.make_game()?))); + let report = self.report_mutex(); let this = std::sync::Arc::new(self); (0..n) @@ -59,11 +56,12 @@ where report.lock().expect("lock is poisoned").add(r); }); - // FIXME: find some way to take the Report from the Mutex - // Mutex::into_inner() does not work - let mut report: Report = report.lock().unwrap().clone(); - report.finalize(); + report.lock().expect("lock is poisoned").finalize(); + drop(report); - Ok(report) + Ok(self.report()) } + // PERF: Somehow returning &Report would be better as we don't need to clone then + fn report(&'wl self) -> Report; + fn report_mutex(&'wl self) -> Arc>; } diff --git a/src/bench/report.rs b/src/bench/report.rs index 5b6a352..ad3ad31 100644 --- a/src/bench/report.rs +++ b/src/bench/report.rs @@ -3,6 +3,7 @@ use core::panic; use libpt::log::debug; use std::fmt::Display; use std::ops::Div; +use rayon::prelude::*; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -69,9 +70,11 @@ impl Report { self.total_steps() as f64 / self.n() as f64 } - pub fn avg_time(&self) -> Option { - let av = self.benchtime()? / self.n() as i32; - Some(av) + pub fn avg_time(&self) -> TimeDelta { + if self.n() == 0 { + return TimeDelta::new(0, 0).unwrap(); + } + self.benchtime() / self.n() as i32 } fn rating_steps(&self) -> f64 { @@ -83,20 +86,25 @@ impl Report { WEIGHTING_WIN * (1.0 - self.avg_win()) } - fn rating_time(&self) -> Option { - let n = 1.0 / (1.0 + (self.avg_time()?.num_nanoseconds()? as f64).exp()); - Some(WEIGHTING_TIME * (1.0 - n)) + fn rating_time(&self) -> f64 { + let n = 1.0 + / (1.0 + + (self + .avg_time() + .num_nanoseconds() + .expect("nanoseconds overflow") as f64) + .exp()); + WEIGHTING_TIME * (1.0 - n) } - pub fn rating(&self) -> Option { + pub fn rating(&self) -> f64 { let rating_steps: f64 = self.rating_steps(); let rating_win: f64 = self.rating_win(); - let rating_time: f64 = self.rating_time()?; + let rating_time: f64 = self.rating_time(); debug!("partial rating - steps: {}", rating_steps); debug!("partial rating - win: {}", rating_win); debug!("partial rating - time: {:?}", rating_time); - let r = rating_win + rating_time + rating_steps; - Some(r) + rating_win + rating_time + rating_steps } /// finalize the record @@ -116,8 +124,8 @@ impl Report { self.finished } - pub fn benchtime(&self) -> Option { - self.benchtime + pub fn benchtime(&self) -> TimeDelta { + chrono::Local::now().naive_local() - self.start } pub fn max_steps(&self) -> usize { @@ -132,9 +140,6 @@ impl Display for Report { /// /// This will panic if the [Report] is not finished fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if !self.finished { - panic!("can only display finished reports"); - } write!( f, "n: {}, win_ratio: {:.2}%, avg_score: {:.4} steps until finish, avgerage time per game: {}μs, \n\ @@ -143,9 +148,9 @@ impl Display for Report { self.n(), self.avg_win() * 100.0, self.avg_steps(), - self.avg_time().unwrap().num_microseconds().expect("overflow when converting to micrseconds"), - self.rating().unwrap(), - self.benchtime().unwrap().num_milliseconds() + self.avg_time(), + self.rating(), + self.benchtime() ) } } diff --git a/src/bin/bench/cli.rs b/src/bin/bench/cli.rs index 190b316..e2772cc 100644 --- a/src/bin/bench/cli.rs +++ b/src/bin/bench/cli.rs @@ -2,11 +2,15 @@ // #![warn(missing_docs)] #![warn(missing_debug_implementations)] +use std::sync::Arc; + use clap::Parser; use libpt::log::*; use wordle_analyzer::bench::builtin::BuiltinBenchmark; +use wordle_analyzer::bench::report::Report; use wordle_analyzer::bench::{Benchmark, DEFAULT_N}; +use wordle_analyzer::error::WResult; use wordle_analyzer::solve::{BuiltinSolverNames, Solver}; use wordle_analyzer::wlist::builtin::BuiltinWList; @@ -33,6 +37,11 @@ struct Cli { /// how many games to play for the benchmark #[arg(short, long, default_value_t = DEFAULT_N)] n: usize, + /// how many threads to use for benchmarking + /// + /// Note that the application as the whole will use at least one more thread. + #[arg(short, long, default_value_t = num_cpus::get())] + threads: usize, } fn main() -> anyhow::Result<()> { @@ -50,11 +59,19 @@ fn main() -> anyhow::Result<()> { .max_steps(cli.max_steps) .precompute(cli.precompute); let solver = cli.solver.to_solver(&wl); - let bench = BuiltinBenchmark::build(&wl, solver, builder)?; + let bench = Arc::new(BuiltinBenchmark::build(&wl, solver, builder, cli.threads)?); + let bench_running = bench.clone(); trace!("{bench:#?}"); - let report = bench.bench(cli.n)?; + let n = cli.n; + let bench_th: std::thread::JoinHandle> = + std::thread::spawn(move || bench_running.bench(n)); - println!("{report}"); + while !bench_th.is_finished() { + println!("{}", bench.report()); + } + + // finished report + println!("{}", bench_th.join().expect("thread go boom")?); Ok(()) } diff --git a/src/error.rs b/src/error.rs index 5cb68f9..6145454 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,11 +8,16 @@ pub type GameResult = std::result::Result; #[derive(Debug, Error)] pub enum Error { - #[error("GameError")] + #[error("Game Error")] GameError { #[from] source: GameError, }, + #[error("Benchmark Error")] + BenchError { + #[from] + source: BenchError, + }, #[error(transparent)] Other { #[from] @@ -42,3 +47,9 @@ pub enum GameError { #[error("Tried to guess a word that is not in the wordlist ({0})")] WordNotInWordlist(Word), } + +#[derive(Debug, Clone, Error)] +pub enum BenchError { + #[error("Trying to modify a finished report")] + ModifyFinishedReport +}