DownloadSchedule: Abolish accessors in DownloadScheduleConfig

We can just make the fields pub(crate).
This commit is contained in:
Ian Jackson 2022-04-26 15:09:54 +01:00
parent eb35555330
commit cb303cefb3
3 changed files with 19 additions and 42 deletions

View File

@ -115,17 +115,17 @@ pub struct DownloadScheduleConfig {
field(build = "self.retry_bootstrap.build_retry_bootstrap()?")
)]
#[builder_field_attr(serde(default))]
retry_bootstrap: DownloadSchedule,
pub(crate) retry_bootstrap: DownloadSchedule,
/// Configuration for how to retry a consensus download.
#[builder(sub_builder)]
#[builder_field_attr(serde(default))]
retry_consensus: DownloadSchedule,
pub(crate) retry_consensus: DownloadSchedule,
/// Configuration for how to retry an authority cert download.
#[builder(sub_builder)]
#[builder_field_attr(serde(default))]
retry_certs: DownloadSchedule,
pub(crate) retry_certs: DownloadSchedule,
/// Configuration for how to retry a microdescriptor download.
#[builder(
@ -133,7 +133,7 @@ pub struct DownloadScheduleConfig {
field(build = "self.retry_microdescs.build_retry_microdescs()?")
)]
#[builder_field_attr(serde(default))]
retry_microdescs: DownloadSchedule,
pub(crate) retry_microdescs: DownloadSchedule,
}
impl Default for DownloadScheduleConfig {
@ -288,29 +288,6 @@ pub struct DirMgrExtensions {
pub filter: crate::filter::FilterConfig,
}
impl DownloadScheduleConfig {
/// Return configuration for retrying our entire bootstrap
/// operation at startup.
pub(crate) fn retry_bootstrap(&self) -> &DownloadSchedule {
&self.retry_bootstrap
}
/// Return configuration for retrying a consensus download.
pub(crate) fn retry_consensus(&self) -> &DownloadSchedule {
&self.retry_consensus
}
/// Return configuration for retrying an authority certificate download
pub(crate) fn retry_certs(&self) -> &DownloadSchedule {
&self.retry_certs
}
/// Return configuration for retrying an authority certificate download
pub(crate) fn retry_microdescs(&self) -> &DownloadSchedule {
&self.retry_microdescs
}
}
#[cfg(test)]
mod test {
#![allow(clippy::unwrap_used)]
@ -380,9 +357,9 @@ mod test {
let mut bld = DownloadScheduleConfig::builder();
let cfg = bld.build().unwrap();
assert_eq!(cfg.retry_microdescs().parallelism(), 4);
assert_eq!(cfg.retry_microdescs().n_attempts(), 3);
assert_eq!(cfg.retry_bootstrap().n_attempts(), 128);
assert_eq!(cfg.retry_microdescs.parallelism(), 4);
assert_eq!(cfg.retry_microdescs.n_attempts(), 3);
assert_eq!(cfg.retry_bootstrap.n_attempts(), 128);
bld.retry_consensus().attempts(7);
bld.retry_consensus().initial_delay(Duration::new(86400, 0));
@ -399,11 +376,11 @@ mod test {
bld.retry_microdescs().parallelism(1);
let cfg = bld.build().unwrap();
assert_eq!(cfg.retry_microdescs().parallelism(), 1);
assert_eq!(cfg.retry_microdescs().n_attempts(), 6);
assert_eq!(cfg.retry_bootstrap().n_attempts(), 4);
assert_eq!(cfg.retry_consensus().n_attempts(), 7);
assert_eq!(cfg.retry_certs().n_attempts(), 5);
assert_eq!(cfg.retry_microdescs.parallelism(), 1);
assert_eq!(cfg.retry_microdescs.n_attempts(), 6);
assert_eq!(cfg.retry_bootstrap.n_attempts(), 4);
assert_eq!(cfg.retry_consensus.n_attempts(), 7);
assert_eq!(cfg.retry_certs.n_attempts(), 5);
Ok(())
}

View File

@ -534,7 +534,7 @@ impl<R: Runtime> DirMgr<R> {
// TODO(nickm): instead of getting this every time we loop, it
// might be a good idea to refresh it with each attempt, at
// least at the point of checking the number of attempts.
*dirmgr.config.get().schedule().retry_bootstrap()
dirmgr.config.get().schedule().retry_bootstrap
};
let mut retry_delay = retry_config.schedule();

View File

@ -217,7 +217,7 @@ impl<DM: WriteNetDir> DirState for GetConsensusState<DM> {
}
fn dl_config(&self) -> Result<DownloadSchedule> {
if let Some(wd) = Weak::upgrade(&self.writedir) {
Ok(*wd.config().schedule().retry_consensus())
Ok(wd.config().schedule().retry_consensus)
} else {
Err(Error::ManagerDropped)
}
@ -398,7 +398,7 @@ impl<DM: WriteNetDir> DirState for GetCertsState<DM> {
}
fn dl_config(&self) -> Result<DownloadSchedule> {
if let Some(wd) = Weak::upgrade(&self.writedir) {
Ok(*wd.config().schedule().retry_certs())
Ok(wd.config().schedule().retry_certs)
} else {
Err(Error::ManagerDropped)
}
@ -791,7 +791,7 @@ impl<DM: WriteNetDir> DirState for GetMicrodescsState<DM> {
}
fn dl_config(&self) -> Result<DownloadSchedule> {
if let Some(wd) = Weak::upgrade(&self.writedir) {
Ok(*wd.config().schedule().retry_microdescs())
Ok(wd.config().schedule().retry_microdescs)
} else {
Err(Error::ManagerDropped)
}
@ -1139,7 +1139,7 @@ mod test {
// Download configuration is simple: only 1 request can be done in
// parallel. It uses a consensus retry schedule.
let retry = state.dl_config().unwrap();
assert_eq!(&retry, DownloadScheduleConfig::default().retry_consensus());
assert_eq!(retry, DownloadScheduleConfig::default().retry_consensus);
// Do we know what we want?
let docs = state.missing_docs();
@ -1240,7 +1240,7 @@ mod test {
let consensus_expires = datetime!(2020-08-07 12:43:20 UTC).into();
assert_eq!(state.reset_time(), Some(consensus_expires));
let retry = state.dl_config().unwrap();
assert_eq!(&retry, DownloadScheduleConfig::default().retry_certs());
assert_eq!(retry, DownloadScheduleConfig::default().retry_certs);
// Bootstrap status okay?
assert_eq!(
@ -1366,7 +1366,7 @@ mod test {
assert!(reset_time <= valid_until);
}
let retry = state.dl_config().unwrap();
assert_eq!(&retry, DownloadScheduleConfig::default().retry_microdescs());
assert_eq!(retry, DownloadScheduleConfig::default().retry_microdescs);
assert_eq!(
state.bootstrap_status().to_string(),
"fetching microdescriptors (0/4)"